borgbackup-1.1.15/0000755000175000017500000000000013771325773013713 5ustar useruser00000000000000borgbackup-1.1.15/setup.cfg0000644000175000017500000000047513771325773015542 0ustar useruser00000000000000[tool:pytest] python_files = testsuite/*.py markers = allow_cache_wipe [flake8] ignore = E122,E123,E125,E126,E127,E128,E226,E402,E722,E731,E741, F401,F405,F811, W504 max-line-length = 255 exclude = build,dist,.git,.idea,.cache,.tox,docs/conf.py,src/borg/algorithms/msgpack [egg_info] tag_build = tag_date = 0 borgbackup-1.1.15/tox.ini0000644000175000017500000000074413771325506015225 0ustar useruser00000000000000# tox configuration - if you change anything here, run this to verify: # fakeroot -u tox --recreate [tox] envlist = py{35,36,37,38,39} [testenv] deps = -rrequirements.d/development.txt -rrequirements.d/fuse.txt commands = py.test -n {env:XDISTN:4} -rs --cov=borg --cov-config=.coveragerc --benchmark-skip --pyargs {posargs:borg.testsuite} # fakeroot -u needs some env vars: passenv = * [testenv:flake8] changedir = deps = flake8 commands = flake8 src scripts conftest.py borgbackup-1.1.15/README.rst0000644000175000017500000001754213771325506015405 0ustar useruser00000000000000|screencast_basic| More screencasts: `installation`_, `advanced usage`_ What is BorgBackup? ------------------- BorgBackup (short: Borg) is a deduplicating backup program. Optionally, it supports compression and authenticated encryption. The main goal of Borg is to provide an efficient and secure way to backup data. The data deduplication technique used makes Borg suitable for daily backups since only changes are stored. The authenticated encryption technique makes it suitable for backups to not fully trusted targets. See the `installation manual`_ or, if you have already downloaded Borg, ``docs/installation.rst`` to get started with Borg. There is also an `offline documentation`_ available, in multiple formats. .. _installation manual: https://borgbackup.readthedocs.org/en/stable/installation.html .. _offline documentation: https://readthedocs.org/projects/borgbackup/downloads Main features ~~~~~~~~~~~~~ **Space efficient storage** Deduplication based on content-defined chunking is used to reduce the number of bytes stored: each file is split into a number of variable length chunks and only chunks that have never been seen before are added to the repository. A chunk is considered duplicate if its id_hash value is identical. A cryptographically strong hash or MAC function is used as id_hash, e.g. (hmac-)sha256. To deduplicate, all the chunks in the same repository are considered, no matter whether they come from different machines, from previous backups, from the same backup or even from the same single file. Compared to other deduplication approaches, this method does NOT depend on: * file/directory names staying the same: So you can move your stuff around without killing the deduplication, even between machines sharing a repo. * complete files or time stamps staying the same: If a big file changes a little, only a few new chunks need to be stored - this is great for VMs or raw disks. * The absolute position of a data chunk inside a file: Stuff may get shifted and will still be found by the deduplication algorithm. **Speed** * performance-critical code (chunking, compression, encryption) is implemented in C/Cython * local caching of files/chunks index data * quick detection of unmodified files **Data encryption** All data can be protected using 256-bit AES encryption, data integrity and authenticity is verified using HMAC-SHA256. Data is encrypted clientside. **Compression** All data can be optionally compressed: * lz4 (super fast, low compression) * zstd (wide range from high speed and low compression to high compression and lower speed) * zlib (medium speed and compression) * lzma (low speed, high compression) **Off-site backups** Borg can store data on any remote host accessible over SSH. If Borg is installed on the remote host, big performance gains can be achieved compared to using a network filesystem (sshfs, nfs, ...). **Backups mountable as filesystems** Backup archives are mountable as userspace filesystems for easy interactive backup examination and restores (e.g. by using a regular file manager). **Easy installation on multiple platforms** We offer single-file binaries that do not require installing anything - you can just run them on these platforms: * Linux * Mac OS X * FreeBSD * OpenBSD and NetBSD (no xattrs/ACLs support or binaries yet) * Cygwin (experimental, no binaries yet) * Linux Subsystem of Windows 10 (experimental) **Free and Open Source Software** * security and functionality can be audited independently * licensed under the BSD (3-clause) license, see `License`_ for the complete license Easy to use ~~~~~~~~~~~ Initialize a new backup repository (see ``borg init --help`` for encryption options):: $ borg init -e repokey /path/to/repo Create a backup archive:: $ borg create /path/to/repo::Saturday1 ~/Documents Now doing another backup, just to show off the great deduplication:: $ borg create -v --stats /path/to/repo::Saturday2 ~/Documents ----------------------------------------------------------------------------- Archive name: Saturday2 Archive fingerprint: 622b7c53c... Time (start): Sat, 2016-02-27 14:48:13 Time (end): Sat, 2016-02-27 14:48:14 Duration: 0.88 seconds Number of files: 163 ----------------------------------------------------------------------------- Original size Compressed size Deduplicated size This archive: 6.85 MB 6.85 MB 30.79 kB <-- ! All archives: 13.69 MB 13.71 MB 6.88 MB Unique chunks Total chunks Chunk index: 167 330 ----------------------------------------------------------------------------- For a graphical frontend refer to our complementary project `BorgWeb `_. Helping, Donations and Bounties, becoming a Patron -------------------------------------------------- Your help is always welcome! Spread the word, give feedback, help with documentation, testing or development. You can also give monetary support to the project, see there for details: https://www.borgbackup.org/support/fund.html Links ----- * `Main Web Site `_ * `Releases `_, `PyPI packages `_ and `ChangeLog `_ * `Offline Documentation `_ * `GitHub `_ and `Issue Tracker `_. * `Web-Chat (IRC) `_ and `Mailing List `_ * `License `_ * `Security contact `_ Compatibility notes ------------------- EXPECT THAT WE WILL BREAK COMPATIBILITY REPEATEDLY WHEN MAJOR RELEASE NUMBER CHANGES (like when going from 0.x.y to 1.0.0 or from 1.x.y to 2.0.0). NOT RELEASED DEVELOPMENT VERSIONS HAVE UNKNOWN COMPATIBILITY PROPERTIES. THIS IS SOFTWARE IN DEVELOPMENT, DECIDE YOURSELF WHETHER IT FITS YOUR NEEDS. Security issues should be reported to the `Security contact`_ (or see ``docs/support.rst`` in the source distribution). .. start-badges |doc| |build| |coverage| |bestpractices| |bounties| .. |bounties| image:: https://api.bountysource.com/badge/team?team_id=78284&style=bounties_posted :alt: Bounty Source :target: https://www.bountysource.com/teams/borgbackup .. |doc| image:: https://readthedocs.org/projects/borgbackup/badge/?version=stable :alt: Documentation :target: https://borgbackup.readthedocs.org/en/stable/ .. |build| image:: https://github.com/borgbackup/borg/workflows/CI/badge.svg?branch=1.1-maint :alt: Build Status (1.1-maint) :target: https://github.com/borgbackup/borg/actions .. |coverage| image:: https://codecov.io/github/borgbackup/borg/coverage.svg?branch=1.1-maint :alt: Test Coverage (1.1-maint) :target: https://codecov.io/github/borgbackup/borg?branch=1.1-maint .. |screencast_basic| image:: https://asciinema.org/a/133292.png :alt: BorgBackup Basic Usage :target: https://asciinema.org/a/133292?autoplay=1&speed=1 .. _installation: https://asciinema.org/a/133291?autoplay=1&speed=1 .. _advanced usage: https://asciinema.org/a/133293?autoplay=1&speed=1 .. |bestpractices| image:: https://bestpractices.coreinfrastructure.org/projects/271/badge :alt: Best Practices Score :target: https://bestpractices.coreinfrastructure.org/projects/271 .. end-badges borgbackup-1.1.15/requirements.d/0000755000175000017500000000000013771325773016660 5ustar useruser00000000000000borgbackup-1.1.15/requirements.d/fuse.txt0000644000175000017500000000016513771325506020357 0ustar useruser00000000000000# low-level FUSE support library for "borg mount" # please see the comments in setup.py about llfuse. llfuse >=1.3.4 borgbackup-1.1.15/requirements.d/coala.txt0000644000175000017500000000016613771325506020475 0ustar useruser00000000000000# style and other checks for many languages. # some bears (checkers) have additional requirements. coala coala-bears borgbackup-1.1.15/requirements.d/development.lock.txt0000644000175000017500000000035113771325506022663 0ustar useruser00000000000000setuptools==50.3.0 setuptools-scm==4.1.2 pip==20.2.3 virtualenv==20.0.33 importlib-metadata==1.7.0 pluggy==0.13.1 tox==3.20.0 pytest==6.1.1 pytest-xdist==2.1.0 pytest-cov==2.10.1 pytest-benchmark==3.2.3 Cython==0.29.21 twine==1.15.0 borgbackup-1.1.15/requirements.d/development.txt0000644000175000017500000000015213771325506021733 0ustar useruser00000000000000setuptools setuptools_scm pip virtualenv tox pytest pytest-xdist pytest-cov pytest-benchmark Cython twine borgbackup-1.1.15/requirements.d/docs.txt0000644000175000017500000000003313771325506020337 0ustar useruser00000000000000sphinx guzzle_sphinx_theme borgbackup-1.1.15/.coveragerc0000644000175000017500000000074113771325506016030 0ustar useruser00000000000000[run] branch = True disable_warnings = module-not-measured source = src/borg omit = */borg/__init__.py */borg/__main__.py */borg/_version.py */borg/fuse.py */borg/support/* */borg/testsuite/* */borg/hash_sizes.py [report] exclude_lines = pragma: no cover pragma: freebsd only pragma: unknown platform only def __repr__ raise AssertionError raise NotImplementedError if 0: if __name__ == .__main__.: ignore_errors = True borgbackup-1.1.15/setup_lz4.py0000644000175000017500000000466113771325506016217 0ustar useruser00000000000000# Support code for building a C extension with lz4 files # # Copyright (c) 2016-present, Gregory Szorc (original code for zstd) # 2017-present, Thomas Waldmann (mods to make it more generic, code for lz4) # All rights reserved. # # This software may be modified and distributed under the terms # of the BSD license. See the LICENSE file for details. import os # lz4 files, structure as seen in lz4 project repository: lz4_sources = [ 'lib/lz4.c', ] lz4_includes = [ 'lib', ] def lz4_system_prefix(prefixes): for prefix in prefixes: filename = os.path.join(prefix, 'include', 'lz4.h') if os.path.exists(filename): with open(filename, 'rb') as fd: if b'LZ4_compress_default' in fd.read(): # requires lz4 >= 1.7.0 (r129) return prefix def lz4_ext_kwargs(bundled_path, system_prefix=None, system=False, **kwargs): """amend kwargs with lz4 stuff for a distutils.extension.Extension initialization. bundled_path: relative (to this file) path to the bundled library source code files system_prefix: where the system-installed library can be found system: True: use the system-installed shared library, False: use the bundled library code kwargs: distutils.extension.Extension kwargs that should be amended returns: amended kwargs """ def multi_join(paths, *path_segments): """apply os.path.join on a list of paths""" return [os.path.join(*(path_segments + (path, ))) for path in paths] use_system = system and system_prefix is not None sources = kwargs.get('sources', []) if not use_system: sources += multi_join(lz4_sources, bundled_path) include_dirs = kwargs.get('include_dirs', []) if use_system: include_dirs += multi_join(['include'], system_prefix) else: include_dirs += multi_join(lz4_includes, bundled_path) library_dirs = kwargs.get('library_dirs', []) if use_system: library_dirs += multi_join(['lib'], system_prefix) libraries = kwargs.get('libraries', []) if use_system: libraries += ['lz4', ] extra_compile_args = kwargs.get('extra_compile_args', []) if not use_system: extra_compile_args += [] # not used yet ret = dict(**kwargs) ret.update(dict(sources=sources, extra_compile_args=extra_compile_args, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries)) return ret borgbackup-1.1.15/CHANGES.rst0000644000175000017500000044366113771325506015525 0ustar useruser00000000000000.. _important_notes: Important notes =============== This section provides information about security and corruption issues. .. _hashindex_set_bug: Pre-1.1.11 potential index corruption / data loss issue ------------------------------------------------------- A bug was discovered in our hashtable code, see issue #4829. The code is used for the client-side chunks cache and the server-side repo index. Although borg uses the hashtables very heavily, the index corruption did not happen too frequently, because it needed specific conditions to happen. Data loss required even more specific conditions, so it should be rare (and also detectable via borg check). You might be affected if borg crashed with / complained about: - AssertionError: Corrupted segment reference count - corrupted index or hints - ObjectNotFound: Object with key ... not found in repository ... - Index mismatch for key b'...'. (..., ...) != (-1, -1) - ValueError: stats_against: key contained in self but not in master_index. Advised procedure to fix any related issue in your indexes/caches: - install fixed borg code (on client AND server) - for all of your clients and repos remove the cache by: borg delete --cache-only YOURREPO (later, the cache will be re-built automatically) - for all your repos, rebuild the repo index by: borg check --repair YOURREPO This will also check all archives and detect if there is any data-loss issue. Affected branches / releases: - fd06497 introduced the bug into 1.1-maint branch - it affects all borg 1.1.x since 1.1.0b4. - fd06497 introduced the bug into master branch - it affects all borg 1.2.0 alpha releases. - c5cd882 introduced the bug into 1.0-maint branch - it affects all borg 1.0.x since 1.0.11rc1. The bug was fixed by: - 701159a fixes the bug in 1.1-maint branch - will be released with borg 1.1.11. - fa63150 fixes the bug in master branch - will be released with borg 1.2.0a8. - 7bb90b6 fixes the bug in 1.0-maint branch. Branch is EOL, no new release is planned as of now. .. _broken_validator: Pre-1.1.4 potential data corruption issue ----------------------------------------- A data corruption bug was discovered in borg check --repair, see issue #3444. This is a 1.1.x regression, releases < 1.1 (e.g. 1.0.x) are not affected. To avoid data loss, you must not run borg check --repair using an unfixed version of borg 1.1.x. The first official release that has the fix is 1.1.4. Package maintainers may have applied the fix to updated packages of 1.1.x (x<4) though, see the package maintainer's package changelog to make sure. If you never had missing item metadata chunks, the bug has not affected you even if you did run borg check --repair with an unfixed version. When borg check --repair tried to repair corrupt archives that miss item metadata chunks, the resync to valid metadata in still present item metadata chunks malfunctioned. This was due to a broken validator that considered all (even valid) item metadata as invalid. As they were considered invalid, borg discarded them. Practically, that means the affected files, directories or other fs objects were discarded from the archive. Due to the malfunction, the process was extremely slow, but if you let it complete, borg would have created a "repaired" archive that has lost a lot of items. If you interrupted borg check --repair because it was so strangely slow (killing borg somehow, e.g. Ctrl-C) the transaction was rolled back and no corruption occurred. The log message indicating the precondition for the bug triggering looks like: item metadata chunk missing [chunk: 001056_bdee87d...a3e50d] If you never had that in your borg check --repair runs, you're not affected. But if you're unsure or you actually have seen that, better check your archives. By just using "borg list repo::archive" you can see if all expected filesystem items are listed. .. _tam_vuln: Pre-1.0.9 manifest spoofing vulnerability (CVE-2016-10099) ---------------------------------------------------------- A flaw in the cryptographic authentication scheme in Borg allowed an attacker to spoof the manifest. The attack requires an attacker to be able to 1. insert files (with no additional headers) into backups 2. gain write access to the repository This vulnerability does not disclose plaintext to the attacker, nor does it affect the authenticity of existing archives. The vulnerability allows an attacker to create a spoofed manifest (the list of archives). Creating plausible fake archives may be feasible for small archives, but is unlikely for large archives. The fix adds a separate authentication tag to the manifest. For compatibility with prior versions this authentication tag is *not* required by default for existing repositories. Repositories created with 1.0.9 and later require it. Steps you should take: 1. Upgrade all clients to 1.0.9 or later. 2. Run ``borg upgrade --tam `` *on every client* for *each* repository. 3. This will list all archives, including archive IDs, for easy comparison with your logs. 4. Done. Prior versions can access and modify repositories with this measure enabled, however, to 1.0.9 or later their modifications are indiscernible from an attack and will raise an error until the below procedure is followed. We are aware that this can be be annoying in some circumstances, but don't see a way to fix the vulnerability otherwise. In case a version prior to 1.0.9 is used to modify a repository where above procedure was completed, and now you get an error message from other clients: 1. ``borg upgrade --tam --force `` once with *any* client suffices. This attack is mitigated by: - Noting/logging ``borg list``, ``borg info``, or ``borg create --stats``, which contain the archive IDs. We are not aware of others having discovered, disclosed or exploited this vulnerability. Vulnerability time line: * 2016-11-14: Vulnerability and fix discovered during review of cryptography by Marian Beermann (@enkore) * 2016-11-20: First patch * 2016-12-20: Released fixed version 1.0.9 * 2017-01-02: CVE was assigned * 2017-01-15: Released fixed version 1.1.0b3 (fix was previously only available from source) .. _attic013_check_corruption: Pre-1.0.9 potential data loss ----------------------------- If you have archives in your repository that were made with attic <= 0.13 (and later migrated to borg), running borg check would report errors in these archives. See issue #1837. The reason for this is a invalid (and useless) metadata key that was always added due to a bug in these old attic versions. If you run borg check --repair, things escalate quickly: all archive items with invalid metadata will be killed. Due to that attic bug, that means all items in all archives made with these old attic versions. Pre-1.0.4 potential repo corruption ----------------------------------- Some external errors (like network or disk I/O errors) could lead to corruption of the backup repository due to issue #1138. A sign that this happened is if "E" status was reported for a file that can not be explained by problems with the source file. If you still have logs from "borg create -v --list", you can check for "E" status. Here is what could cause corruption and what you can do now: 1) I/O errors (e.g. repo disk errors) while writing data to repo. This could lead to corrupted segment files. Fix:: # check for corrupt chunks / segments: borg check -v --repository-only REPO # repair the repo: borg check -v --repository-only --repair REPO # make sure everything is fixed: borg check -v --repository-only REPO 2) Unreliable network / unreliable connection to the repo. This could lead to archive metadata corruption. Fix:: # check for corrupt archives: borg check -v --archives-only REPO # delete the corrupt archives: borg delete --force REPO::CORRUPT_ARCHIVE # make sure everything is fixed: borg check -v --archives-only REPO 3) In case you want to do more intensive checking. The best check that everything is ok is to run a dry-run extraction:: borg extract -v --dry-run REPO::ARCHIVE .. _changelog: Changelog ========= Version 1.1.15 (2020-12-25) --------------------------- Compatibility notes: - When upgrading from borg 1.0.x to 1.1.x, please note: - read all the compatibility notes for 1.1.0*, starting from 1.1.0b1. - borg upgrade: you do not need to and you also should not run it. - borg might ask some security-related questions once after upgrading. You can answer them either manually or via environment variable. One known case is if you use unencrypted repositories, then it will ask about a unknown unencrypted repository one time. - your first backup with 1.1.x might be significantly slower (it might completely read, chunk, hash a lot files) - this is due to the --files-cache mode change (and happens every time you change mode). You can avoid the one-time slowdown by using the pre-1.1.0rc4-compatible mode (but that is less safe for detecting changed files than the default). See the --files-cache docs for details. - 1.1.11 removes WSL autodetection (Windows 10 Subsystem for Linux). If WSL still has a problem with sync_file_range, you need to set BORG_WORKAROUNDS=basesyncfile in the borg process environment to work around the WSL issue. - 1.1.14 changes return codes due to a bug fix: In case you have scripts expecting rc == 2 for a signal exit, you need to update them to check for >= 128 (as documented since long). - 1.1.15 drops python 3.4 support, minimum requirement is 3.5 now. Fixes: - extract: - improve exception handling when setting xattrs, #5092. - emit a warning message giving the path, xattr key and error message. - continue trying to restore other xattrs and bsdflags of the same file after an exception with xattr-setting happened. - export-tar: - set tar format to GNU_FORMAT explicitly, #5274 - fix memory leak with ssh: remote repository, #5568 - fix potential memory leak with ssh: remote repository with partial extraction - create: fix --dry-run and --stats coexistence, #5415 - use --timestamp for {utcnow} and {now} if given, #5189 New features: - create: implement --stdin-mode, --stdin-user and --stdin-group, #5333 - allow appending the files cache filename with BORG_FILES_CACHE_SUFFIX env var Other changes: - drop python 3.4 support, minimum requirement is 3.5 now. - enable using libxxhash instead of bundled xxh64 code - update llfuse requirements (1.3.8) - set cython language_level in some files to fix warnings - allow EIO with warning when trying to hardlink - PropDict: fail early if internal_dict is not a dict - update shell completions - tests / CI - add a test for the hashindex corruption bug, #5531 #4829 - fix spurious failure in test_cache_files, #5438 - added a github ci workflow - reduce testing on travis, no macOS, no py3x-dev, #5467 - travis: use newer dists, native py on dist - vagrant: - remove jessie and trusty boxes, #5348 #5383 - pyinstaller 4.0, build on py379 - binary build on stretch64, #5348 - remove easy_install based pip installation - docs: - clarify '--one-file-system' for btrfs, #5391 - add example for excluding content using the --pattern cmd line arg - complement the documentation for pattern files and exclude files, #5524 - made ansible playbook more generic, use package instead of pacman. also change state from "latest" to "present". - complete documentation on append-only remote repos, #5497 - internals: rather talk about target size than statistics, #5336 - new compression algorithm policy, #1633 #5505 - faq: add a hint on sleeping computer, #5301 - note requirements for full disk access on macOS Catalina, #5303 - fix/improve description of borg upgrade hardlink usage, #5518 - modernize 1.1 code: - drop code/workarounds only needed to support Python 3.4 - remove workaround for pre-release py37 argparse bug - removed some outdated comments/docstrings - requirements: remove some restrictions, lock on current versions Version 1.1.14 (2020-10-07) --------------------------- Fixes: - check --repair: fix potential data loss when interrupting it, #5325 - exit with 128 + signal number (as documented) when borg is killed by a signal, #5161 - fix hardlinked CACHEDIR.TAG processing, #4911 - create --read-special: .part files also should be regular files, #5217 - llfuse dependency: choose least broken 1.3.6/1.3.7. 1.3.6 is broken on python 3.9, 1.3.7 is broken on FreeBSD. Other changes: - upgrade bundled xxhash to 0.7.4 - self test: if it fails, also point to OS and hardware, #5334 - pyinstaller: compute basepath from spec file location - prettier error message when archive gets too big, #5307 - check/recreate are not "experimental" any more (but still potentially dangerous): - recreate: remove extra confirmation - rephrase some warnings, update docs, #5164 - shell completions: - misc. updates / fixes - support repositories in fish tab completion, #5256 - complete $BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING - rewrite zsh completion: - completion for almost all optional and positional arguments - completion for Borg environment variables (parameters) - use "allow/deny list" instead of "white/black list" wording - declare "allow_cache_wipe" marker in setup.cfg to avoid pytest warning - vagrant / tests: - misc. fixes / updates - use python 3.5.10 for binary build - build directory-based binaries additionally to the single file binaries - add libffi-dev, required to build python - use cryptography<3.0, more recent versions break the jessie box - test on python 3.9 - do brew update with /dev/null redirect to avoid "too much log output" on travis-ci - docs: - add ssh-agent pull backup method docs, #5288 - how to approach borg speed issues, #5371 - mention double --force in prune docs - update Homebrew install instructions, #5185 - better description of how cache and rebuilds of it work - point to borg create --list item flags in recreate usage, #5165 - add security faq explaining AES-CTR crypto issues, #5254 - add a note to create from stdin regarding files cache, #5180 - fix borg.1 manpage generation regression, #5211 - clarify how exclude options work in recreate, #5193 - add section for retired contributors - hint about not misusing private email addresses of contributors for borg support Version 1.1.13 (2020-06-06) --------------------------- Fixes: - rebuilt using a current Cython version, compatible with python 3.8, #5214 Version 1.1.12 (2020-06-06) --------------------------- Fixes: - fix preload-related memory leak, #5202. - mount / borgfs (FUSE filesystem): - fix FUSE low linear read speed on large files, #5067 - fix crash on old llfuse without birthtime attrs, #5064 - accidentally we required llfuse >= 1.3. Now also old llfuse works again. - set f_namemax in statfs result, #2684 - update precedence of env vars to set config and cache paths, #4894 - correctly calculate compression ratio, taking header size into account, too New features: - --bypass-lock option to bypass locking with read-only repositories Other changes: - upgrade bundled zstd to 1.4.5 - travis: adding comments and explanations to Travis config / install script, improve macOS builds. - tests: test_delete_force: avoid sporadic test setup issues, #5196 - misc. vagrant fixes - the binary for macOS is now built on macOS 10.12 - the binaries for Linux are now built on Debian 8 "Jessie", #3761 - docs: - PlaceholderError not printed as JSON, #4073 - "How important is Borg config?", #4941 - make Sphinx warnings break docs build, #4587 - some markup / warning fixes - add "updating borgbackup.org/releases" to release checklist, #4999 - add "rendering docs" to release checklist, #5000 - clarify borg init's encryption modes - add note about patterns and stored paths, #4160 - add upgrade of tools to pip installation how-to - document one cause of orphaned chunks in check command, #2295 - linked recommended restrictions to ssh public keys on borg servers in faq, #4946 Version 1.1.11 (2020-03-08) --------------------------- Fixes: - fixed potential index corruption / data loss issue due to bug in hashindex_set, #4829. Please read and follow the more detailled notes close to the top of this document. - upgrade bundled xxhash to 0.7.3, #4891. 0.7.2 is the minimum requirement for correct operations on ARMv6 in non-fixup mode, where unaligned memory accesses cause bus errors. 0.7.3 adds some speedups and libxxhash 0.7.3 even has a pkg-config file now. - upgrade bundled lz4 to 1.9.2 - upgrade bundled zstd to 1.4.4 - fix crash when upgrading erroneous hints file, #4922 - extract: - fix KeyError for "partial" extraction, #4607 - fix "partial" extract for hardlinked contentless file types, #4725 - fix preloading for old (0.xx) remote servers, #4652 - fix confusing output of borg extract --list --strip-components, #4934 - delete: after double-force delete, warn about necessary repair, #4704 - create: give invalid repo error msg if repo config not found, #4411 - mount: fix FUSE mount missing st_birthtime, #4763 #4767 - check: do not stumble over invalid item key, #4845 - info: if the archive doesn't exist, print a pretty message, #4793 - SecurityManager.known(): check all files, #4614 - Repository.open: use stat() to check for repo dir, #4695 - Repository.check_can_create_repository: use stat() to check, #4695 - fix invalid archive error message - fix optional/non-optional location arg, #4541 - commit-time free space calc: ignore bad compact map entries, #4796 - ignore EACCES (errno 13) when hardlinking the old config, #4730 - --prefix / -P: fix processing, avoid argparse issue, #4769 New features: - enable placeholder usage in all extra archive arguments - new BORG_WORKAROUNDS mechanism, basesyncfile, #4710 - recreate: support --timestamp option, #4745 - support platforms without os.link (e.g. Android with Termux), #4901. if we don't have os.link, we just extract another copy instead of making a hardlink. - support linux platforms without sync_file_range (e.g. Android 7 with Termux), #4905 Other: - ignore --stats when given with --dry-run, but continue, #4373 - add some ProgressIndicator msgids to code / fix docs, #4935 - elaborate on "Calculating size" message - argparser: always use REPOSITORY in metavar, also use more consistent help phrasing. - check: improve error output for matching index size, see #4829 - docs: - changelog: add advisory about hashindex_set bug #4829 - better describe BORG_SECURITY_DIR, BORG_CACHE_DIR, #4919 - infos about cache security assumptions, #4900 - add FAQ describing difference between a local repo vs. repo on a server. - document how to test exclusion patterns without performing an actual backup - timestamps in the files cache are now usually ctime, #4583 - fix bad reference to borg compact (does not exist in 1.1), #4660 - create: borg 1.1 is not future any more - extract: document limitation "needs empty destination", #4598 - how to supply a passphrase, use crypto devices, #4549 - fix osxfuse github link in installation docs - add example of exclude-norecurse rule in help patterns - update macOS Brew link - add note about software for automating backups, #4581 - AUTHORS: mention copyright+license for bundled msgpack - fix various code blocks in the docs, #4708 - updated docs to cover use of temp directory on remote, #4545 - add restore docs, #4670 - add a pull backup / push restore how-to, #1552 - add FAQ how to retain original paths, #4532 - explain difference between --exclude and --pattern, #4118 - add FAQs for SSH connection issues, #3866 - improve password FAQ, #4591 - reiterate that 'file cache names are absolute' in FAQ - tests: - cope with ANY error when importing pytest into borg.testsuite, #4652 - fix broken test that relied on improper zlib assumptions - test_fuse: filter out selinux xattrs, #4574 - travis / vagrant: - misc python versions removed / changed (due to openssl 1.1 compatibility) or added (3.7 and 3.8, for better borg compatibility testing) - binary building is on python 3.5.9 now - vagrant: - add new boxes: ubuntu 18.04 and 20.04, debian 10 - update boxes: openindiana, darwin, netbsd - remove old boxes: centos 6 - darwin: updated osxfuse to 3.10.4 - use debian/ubuntu pip/virtualenv packages - rather use python 3.6.2 than 3.6.0, fixes coverage/sqlite3 issue - use requirements.d/development.lock.txt to avoid compat issues - travis: - darwin: backport some install code / order from master - remove deprecated keyword "sudo" from travis config - allow osx builds to fail, #4955 this is due to travis-ci frequently being so slow that the OS X builds just fail because they exceed 50 minutes and get killed by travis. Version 1.1.10 (2019-05-16) --------------------------- Fixes: - extract: hang on partial extraction with ssh: repo, when hardlink master is not matched/extracted and borg hangs on related slave hardlink, #4350 - lrucache: regularly remove old FDs, #4427 - avoid stale filehandle issues, #3265 - freebsd: make xattr platform code api compatible with linux, #3952 - use whitelist approach for borg serve, #4097 - borg command shall terminate with rc 2 for ImportErrors, #4424 - create: only run stat_simple_attrs() once, this increases backup with lots of unchanged files performance by ~ 5%. - prune: fix incorrect borg prune --stats output with --dry-run, #4373 - key export: emit user-friendly error if repo key is exported to a directory, #4348 New features: - bundle latest supported msgpack-python release (0.5.6), remove msgpack-python from setup.py install_requires - by default we use the bundled code now. optionally, we still support using an external msgpack (see hints in setup.py), but this requires solid requirements management within distributions and is not recommended. borgbackup will break if you upgrade msgpack to an unsupported version. - display msgpack version as part of sysinfo (e.g. in tracebacks) - timestamp for borg delete --info added, #4359 - enable placeholder usage in --comment and --glob-archives, #4559, #4495 Other: - serve: do not check python/libc for borg serve, #4483 - shell completions: borg diff second archive - release scripts: signing binaries with Qubes OS support - testing: - vagrant: upgrade openbsd box to 6.4 - travis-ci: lock test env to py 3.4 compatible versions, #4343 - get rid of confusing coverage warning, #2069 - rename test_mount_hardlinks to test_fuse_mount_hardlinks, so both can be excluded by "not test_fuse". - pure-py msgpack warning shall not make a lot of tests fail, #4558 - docs: - add "SSH Configuration" section to "borg serve", #3988, #636, #4485 - README: new URL for funding options - add a sample logging.conf in docs/misc, #4380 - elaborate on append-only mode docs, #3504 - installation: added Alpine Linux to distribution list, #4415 - usage.html: only modify window.location when redirecting, #4133 - add msgpack license to docs/3rd_party/msgpack - vagrant / binary builds: - use python 3.5.7 for builds - use osxfuse 3.8.3 Version 1.1.9 (2019-02-10) -------------------------- Fixes: - security fix: configure FUSE with "default_permissions", #3903 "default_permissions" is now enforced by borg by default to let the kernel check uid/gid/mode based permissions. "ignore_permissions" can be given to not enforce "default_permissions". - make "hostname" short, even on misconfigured systems, #4262 - fix free space calculation on macOS (and others?), #4289 - config: quit with error message when no key is provided, #4223 - recover_segment: handle too small segment files correctly, #4272 - correctly release memoryview, #4243 - avoid diaper pattern in configparser by opening files, #4263 - add "# cython: language_level=3" directive to .pyx files, #4214 - info: consider part files for "This archive" stats, #3522 - work around Microsoft WSL issue #645 (sync_file_range), #1961 New features: - add --rsh command line option to complement BORG_RSH env var, #1701 - init: --make-parent-dirs parent1/parent2/repo_dir, #4235 Other: - add archive name to check --repair output, #3447 - check for unsupported msgpack versions - shell completions: - new shell completions for borg 1.1.9 - more complete shell completions for borg mount -o - added shell completions for borg help - option arguments for zsh tab completion - docs: - add FAQ regarding free disk space check, #3905 - update BORG_PASSCOMMAND example and clarify variable expansion, #4249 - FAQ regarding change of compression settings, #4222 - add note about BSD flags to changelog, #4246 - improve logging in example automation script - add note about files changing during backup, #4081 - work around the backslash issue, #4280 - update release workflow using twine (docs, scripts), #4213 - add warnings on repository copies to avoid future problems, #4272 - tests: - fix the homebrew 1.9 issues on travis-ci, #4254 - fix duplicate test method name, #4311 - test_mount_hardlinks: get rid of fakeroot-caused test fails, #3389 Version 1.1.8 (2018-12-09) -------------------------- Fixes: - enforce storage quota if set by serve-command, #4093 - invalid locations: give err msg containing parsed location, #4179 - list repo: add placeholders for hostname and username, #4130 - on linux, symlinks can't have ACLs, so don't try to set any, #4044 New features: - create: added PATH::archive output on INFO log level - read a passphrase from a file descriptor specified in the BORG_PASSPHRASE_FD environment variable. Other: - docs: - option --format is required for some expensive-to-compute values for json borg list by default does not compute expensive values except when they are needed. whether they are needed is determined by the format, in standard mode as well as in --json mode. - tell that our binaries are x86/x64 amd/intel, bauerj has ARM - fixed wrong archive name pattern in CRUD benchmark help - fixed link to cachedir spec in docs, #4140 - tests: - stop using fakeroot on travis, avoids sporadic EISDIR errors, #2482 - xattr key names must start with "user." on linux - fix code so flake8 3.6 does not complain - explicitly convert environment variable to str, #4136 - fix DeprecationWarning: Flags not at the start of the expression, #4137 - support pytest4, #4172 - vagrant: - use python 3.5.6 for builds Version 1.1.7 (2018-08-11) -------------------------- Compatibility notes: - added support for Python 3.7 Fixes: - cache lock: use lock_wait everywhere to fix infinite wait, see #3968 - don't archive tagged dir when recursing an excluded dir, #3991 - py37 argparse: work around bad default in py 3.7.0a/b/rc, #3996 - py37 remove loggerDict.clear() from tearDown method, #3805 - some fixes for bugs which likely did not result in problems in practice: - fixed logic bug in platform module API version check - fixed xattr/acl function prototypes, added missing ones New features: - init: add warning to store both key and passphrase at safe place(s) - BORG_HOST_ID env var to work around all-zero MAC address issue, #3985 - borg debug dump-repo-objs --ghost (dump everything from segment files, including deleted or superceded objects or commit tags) - borg debug search-repo-objs (search in repo objects for hex bytes or strings) Other changes: - add Python 3.7 support - updated shell completions - call socket.gethostname only once - locking: better logging, add some asserts - borg debug dump-repo-objs: - filename layout improvements - use repository.scan() to get on-disk order - docs: - update installation instructions for macOS - added instructions to install fuse via homebrew - improve diff docs - added note that checkpoints inside files requires 1.1+ - add link to tempfile module - remove row/column-spanning from docs source, #4000 #3990 - tests: - fetch less data via os.urandom - add py37 env for tox - travis: add 3.7, remove 3.6-dev (we test with -dev in master) - vagrant / binary builds: - use osxfuse 3.8.2 - use own (uptodate) openindiana box Version 1.1.6 (2018-06-11) -------------------------- Compatibility notes: - 1.1.6 changes: - also allow msgpack-python 0.5.6. Fixes: - fix borg exception handling on ENOSPC error with xattrs, #3808 - prune: fix/improve overall progress display - borg config repo ... does not need cache/manifest/key, #3802 - debug dump-repo-objs should not depend on a manifest obj - pypi package: - include .coveragerc, needed by tox.ini - fix package long description, #3854 New features: - mount: add uid, gid, umask mount options - delete: - only commit once, #3823 - implement --dry-run, #3822 - check: - show progress while rebuilding missing manifest, #3787 - more --repair output - borg config --list , #3612 Other changes: - update msgpack requirement, #3753 - update bundled zstd to 1.3.4, #3745 - update bundled lz4 code to 1.8.2, #3870 - docs: - describe what BORG_LIBZSTD_PREFIX does - fix and deduplicate encryption quickstart docs, #3776 - vagrant: - FUSE for macOS: upgrade 3.7.1 to 3.8.0 - exclude macOS High Sierra upgrade on the darwin64 machine - remove borgbackup.egg-info dir in fs_init (after rsync) - use pyenv-based build/test on jessie32/62 - use local 32 and 64bit debian jessie boxes - use "vagrant" as username for new xenial box - travis OS X: use xcode 8.3 (not broken) Version 1.1.5 (2018-04-01) -------------------------- Compatibility notes: - 1.1.5 changes: - require msgpack-python >= 0.4.6 and < 0.5.0. 0.5.0+ dropped python 3.4 testing and also caused some other issues because the python package was renamed to msgpack and emitted some FutureWarning. Fixes: - create --list: fix that it was never showing M status, #3492 - create: fix timing for first checkpoint (read files cache early, init checkpoint timer after that), see #3394 - extract: set rc=1 when extracting damaged files with all-zero replacement chunks or with size inconsistencies, #3448 - diff: consider an empty file as different to a non-existing file, #3688 - files cache: improve exception handling, #3553 - ignore exceptions in scandir_inorder() caused by an implicit stat(), also remove unneeded sort, #3545 - fixed tab completion problem where a space is always added after path even when it shouldn't - build: do .h file content checks in binary mode, fixes build issue for non-ascii header files on pure-ascii locale platforms, #3544 #3639 - borgfs: fix patterns/paths processing, #3551 - config: add some validation, #3566 - repository config: add validation for max_segment_size, #3592 - set cache previous_location on load instead of save - remove platform.uname() call which caused library mismatch issues, #3732 - add exception handler around deprecated platform.linux_distribution() call - use same datetime object for {now} and {utcnow}, #3548 New features: - create: implement --stdin-name, #3533 - add chunker_params to borg archive info (--json) - BORG_SHOW_SYSINFO=no to hide system information from exceptions Other changes: - updated zsh completions for borg 1.1.4 - files cache related code cleanups - be more helpful when parsing invalid --pattern values, #3575 - be more clear in secure-erase warning message, #3591 - improve getpass user experience, #3689 - docs build: unicode problem fixed when using a py27-based sphinx - docs: - security: explicitly note what happens OUTSIDE the attack model - security: add note about combining compression and encryption - security: describe chunk size / proximity issue, #3687 - quickstart: add note about permissions, borg@localhost, #3452 - quickstart: add introduction to repositories & archives, #3620 - recreate --recompress: add missing metavar, clarify description, #3617 - improve logging docs, #3549 - add an example for --pattern usage, #3661 - clarify path semantics when matching, #3598 - link to offline documentation from README, #3502 - add docs on how to verify a signed release with GPG, #3634 - chunk seed is generated per repository (not: archive) - better formatting of CPU usage documentation, #3554 - extend append-only repo rollback docs, #3579 - tests: - fix erroneously skipped zstd compressor tests, #3606 - skip a test if argparse is broken, #3705 - vagrant: - xenial64 box now uses username 'vagrant', #3707 - move cleanup steps to fs_init, #3706 - the boxcutter wheezy boxes are 404, use local ones - update to Python 3.5.5 (for binary builds) Version 1.1.4 (2017-12-31) -------------------------- Compatibility notes: - When upgrading from borg 1.0.x to 1.1.x, please note: - read all the compatibility notes for 1.1.0*, starting from 1.1.0b1. - borg upgrade: you do not need to and you also should not run it. - borg might ask some security-related questions once after upgrading. You can answer them either manually or via environment variable. One known case is if you use unencrypted repositories, then it will ask about a unknown unencrypted repository one time. - your first backup with 1.1.x might be significantly slower (it might completely read, chunk, hash a lot files) - this is due to the --files-cache mode change (and happens every time you change mode). You can avoid the one-time slowdown by using the pre-1.1.0rc4-compatible mode (but that is less safe for detecting changed files than the default). See the --files-cache docs for details. - borg 1.1.4 changes: - zstd compression is new in borg 1.1.4, older borg can't handle it. - new minimum requirements for the compression libraries - if the required versions (header and lib) can't be found at build time, bundled code will be used: - added requirement: libzstd >= 1.3.0 (bundled: 1.3.2) - updated requirement: liblz4 >= 1.7.0 / r129 (bundled: 1.8.0) Fixes: - check: data corruption fix: fix for borg check --repair malfunction, #3444. See the more detailled notes close to the top of this document. - delete: also delete security dir when deleting a repo, #3427 - prune: fix building the "borg prune" man page, #3398 - init: use given --storage-quota for local repo, #3470 - init: properly quote repo path in output - fix startup delay with dns-only own fqdn resolving, #3471 New features: - added zstd compression. try it! - added placeholder {reverse-fqdn} for fqdn in reverse notation - added BORG_BASE_DIR environment variable, #3338 Other changes: - list help topics when invalid topic is requested - fix lz4 deprecation warning, requires lz4 >= 1.7.0 (r129) - add parens for C preprocessor macro argument usages (did not cause malfunction) - exclude broken pytest 3.3.0 release - updated fish/bash completions - init: more clear exception messages for borg create, #3465 - docs: - add auto-generated docs for borg config - don't generate HTML docs page for borgfs, #3404 - docs update for lz4 b2 zstd changes - add zstd to compression help, readme, docs - update requirements and install docs about bundled lz4 and zstd - refactored build of the compress and crypto.low_level extensions, #3415: - move some lib/build related code to setup_{zstd,lz4,b2}.py - bundle lz4 1.8.0 (requirement: >= 1.7.0 / r129) - bundle zstd 1.3.2 (requirement: >= 1.3.0) - blake2 was already bundled - rename BORG_LZ4_PREFIX env var to BORG_LIBLZ4_PREFIX for better consistency: we also have BORG_LIBB2_PREFIX and BORG_LIBZSTD_PREFIX now. - add prefer_system_lib* = True settings to setup.py - by default the build will prefer a shared library over the bundled code, if library and headers can be found and meet the minimum requirements. Version 1.1.3 (2017-11-27) -------------------------- Fixes: - Security Fix for CVE-2017-15914: Incorrect implementation of access controls allows remote users to override repository restrictions in Borg servers. A user able to access a remote Borg SSH server is able to circumvent access controls post-authentication. Affected releases: 1.1.0, 1.1.1, 1.1.2. Releases 1.0.x are NOT affected. - crc32: deal with unaligned buffer, add tests - this broke borg on older ARM CPUs that can not deal with unaligned 32bit memory accesses and raise a bus error in such cases. the fix might also improve performance on some CPUs as all 32bit memory accesses by the crc32 code are properly aligned now. #3317 - mount: fixed support of --consider-part-files and do not show .borg_part_N files by default in the mounted FUSE filesystem. #3347 - fixed cache/repo timestamp inconsistency message, highlight that information is obtained from security dir (deleting the cache will not bypass this error in case the user knows this is a legitimate repo). - borgfs: don't show sub-command in borgfs help, #3287 - create: show an error when --dry-run and --stats are used together, #3298 New features: - mount: added exclusion group options and paths, #2138 Reused some code to support similar options/paths as borg extract offers - making good use of these to only mount a smaller subset of dirs/files can speed up mounting a lot and also will consume way less memory. borg mount [options] repo_or_archive mountpoint path [paths...] paths: you can just give some "root paths" (like for borg extract) to only partially populate the FUSE filesystem. new options: --exclude[-from], --pattern[s-from], --strip-components - create/extract: support st_birthtime on platforms supporting it, #3272 - add "borg config" command for querying/setting/deleting config values, #3304 Other changes: - clean up and simplify packaging (only package committed files, do not install .c/.h/.pyx files) - docs: - point out tuning options for borg create, #3239 - add instructions for using ntfsclone, zerofree, #81 - move image backup-related FAQ entries to a new page - clarify key aliases for borg list --format, #3111 - mention break-lock in checkpointing FAQ entry, #3328 - document sshfs rename workaround, #3315 - add FAQ about removing files from existing archives - add FAQ about different prune policies - usage and man page for borgfs, #3216 - clarify create --stats duration vs. wall time, #3301 - clarify encrypted key format for borg key export, #3296 - update release checklist about security fixes - document good and problematic option placements, fix examples, #3356 - add note about using --nobsdflags to avoid speed penalty related to bsdflags, #3239 - move most of support section to www.borgbackup.org Version 1.1.2 (2017-11-05) -------------------------- Fixes: - fix KeyError crash when talking to borg server < 1.0.7, #3244 - extract: set bsdflags last (include immutable flag), #3263 - create: don't do stat() call on excluded-norecurse directory, fix exception handling for stat() call, #3209 - create --stats: do not count data volume twice when checkpointing, #3224 - recreate: move chunks_healthy when excluding hardlink master, #3228 - recreate: get rid of chunks_healthy when rechunking (does not match), #3218 - check: get rid of already existing not matching chunks_healthy metadata, #3218 - list: fix stdout broken pipe handling, #3245 - list/diff: remove tag-file options (not used), #3226 New features: - bash, zsh and fish shell auto-completions, see scripts/shell_completions/ - added BORG_CONFIG_DIR env var, #3083 Other changes: - docs: - clarify using a blank passphrase in keyfile mode - mention "!" (exclude-norecurse) type in "patterns" help - document to first heal before running borg recreate to re-chunk stuff, because that will have to get rid of chunks_healthy metadata. - more than 23 is not supported for CHUNK_MAX_EXP, #3115 - borg does not respect nodump flag by default any more - clarify same-filesystem requirement for borg upgrade, #2083 - update / rephrase cygwin / WSL status, #3174 - improve docs about --stats, #3260 - vagrant: openindiana new clang package Already contained in 1.1.1 (last minute fix): - arg parsing: fix fallback function, refactor, #3205. This is a fixup for #3155, which was broken on at least python <= 3.4.2. Version 1.1.1 (2017-10-22) -------------------------- Compatibility notes: - The deprecated --no-files-cache is not a global/common option any more, but only available for borg create (it is not needed for anything else). Use --files-cache=disabled instead of --no-files-cache. - The nodump flag ("do not backup this file") is not honoured any more by default because this functionality (esp. if it happened by error or unexpected) was rather confusing and unexplainable at first to users. If you want that "do not backup NODUMP-flagged files" behaviour, use: borg create --exclude-nodump ... - If you are on Linux and do not need bsdflags archived, consider using ``--nobsdflags`` with ``borg create`` to avoid additional syscalls and speed up backup creation. Fixes: - borg recreate: correctly compute part file sizes. fixes cosmetic, but annoying issue as borg check complains about size inconsistencies of part files in affected archives. you can solve that by running borg recreate on these archives, see also #3157. - bsdflags support: do not open BLK/CHR/LNK files, avoid crashes and slowness, #3130 - recreate: don't crash on attic archives w/o time_end, #3109 - don't crash on repository filesystems w/o hardlink support, #3107 - don't crash in first part of truncate_and_unlink, #3117 - fix server-side IndexError crash with clients < 1.0.7, #3192 - don't show traceback if only a global option is given, show help, #3142 - cache: use SaveFile for more safety, #3158 - init: fix wrong encryption choices in command line parser, fix missing "authenticated-blake2", #3103 - move --no-files-cache from common to borg create options, #3146 - fix detection of non-local path (failed on ..filename), #3108 - logging with fileConfig: set json attr on "borg" logger, #3114 - fix crash with relative BORG_KEY_FILE, #3197 - show excluded dir with "x" for tagged dirs / caches, #3189 New features: - create: --nobsdflags and --exclude-nodump options, #3160 - extract: --nobsdflags option, #3160 Other changes: - remove annoying hardlinked symlinks warning, #3175 - vagrant: use self-made FreeBSD 10.3 box, #3022 - travis: don't brew update, hopefully fixes #2532 - docs: - readme: -e option is required in borg 1.1 - add example showing --show-version --show-rc - use --format rather than --list-format (deprecated) in example - update docs about hardlinked symlinks limitation Version 1.1.0 (2017-10-07) -------------------------- Compatibility notes: - borg command line: do not put options in between positional arguments This sometimes works (e.g. it worked in borg 1.0.x), but can easily stop working if we make positional arguments optional (like it happened for borg create's "paths" argument in 1.1). There are also places in borg 1.0 where we do that, so it doesn't work there in general either. #3356 Good: borg create -v --stats repo::archive path Good: borg create repo::archive path -v --stats Bad: borg create repo::archive -v --stats path Fixes: - fix LD_LIBRARY_PATH restoration for subprocesses, #3077 - "auto" compression: make sure expensive compression is actually better, otherwise store lz4 compressed data we already computed. Other changes: - docs: - FAQ: we do not implement futile attempts of ETA / progress displays - manpage: fix typos, update homepage - implement simple "issue" role for manpage generation, #3075 Version 1.1.0rc4 (2017-10-01) ----------------------------- Compatibility notes: - A borg server >= 1.1.0rc4 does not support borg clients 1.1.0b3-b5. #3033 - The files cache is now controlled differently and has a new default mode: - the files cache now uses ctime by default for improved file change detection safety. You can still use mtime for more speed and less safety. - --ignore-inode is deprecated (use --files-cache=... without "inode") - --no-files-cache is deprecated (use --files-cache=disabled) New features: - --files-cache - implement files cache mode control, #911 You can now control the files cache mode using this option: --files-cache={ctime,mtime,size,inode,rechunk,disabled} (only some combinations are supported). See the docs for details. Fixes: - remote progress/logging: deal with partial lines, #2637 - remote progress: flush json mode output - fix subprocess environments, #3050 (and more) Other changes: - remove client_supports_log_v3 flag, #3033 - exclude broken Cython 0.27(.0) in requirements, #3066 - vagrant: - upgrade to FUSE for macOS 3.7.1 - use Python 3.5.4 to build the binaries - docs: - security: change-passphrase only changes the passphrase, #2990 - fixed/improved borg create --compression examples, #3034 - add note about metadata dedup and --no[ac]time, #2518 - twitter account @borgbackup now, better visible, #2948 - simplified rate limiting wrapper in FAQ Version 1.1.0rc3 (2017-09-10) ----------------------------- New features: - delete: support naming multiple archives, #2958 Fixes: - repo cleanup/write: invalidate cached FDs, #2982 - fix datetime.isoformat() microseconds issues, #2994 - recover_segment: use mmap(), lower memory needs, #2987 Other changes: - with-lock: close segment file before invoking subprocess - keymanager: don't depend on optional readline module, #2976 - docs: - fix macOS keychain integration command - show/link new screencasts in README, #2936 - document utf-8 locale requirement for json mode, #2273 - vagrant: clean up shell profile init, user name, #2977 - test_detect_attic_repo: don't test mount, #2975 - add debug logging for repository cleanup Version 1.1.0rc2 (2017-08-28) ----------------------------- Compatibility notes: - list: corrected mix-up of "isomtime" and "mtime" formats. Previously, "isomtime" was the default but produced a verbose human format, while "mtime" produced a ISO-8601-like format. The behaviours have been swapped (so "mtime" is human, "isomtime" is ISO-like), and the default is now "mtime". "isomtime" is now a real ISO-8601 format ("T" between date and time, not a space). New features: - None. Fixes: - list: fix weird mixup of mtime/isomtime - create --timestamp: set start time, #2957 - ignore corrupt files cache, #2939 - migrate locks to child PID when daemonize is used - fix exitcode of borg serve, #2910 - only compare contents when chunker params match, #2899 - umount: try fusermount, then try umount, #2863 Other changes: - JSON: use a more standard ISO 8601 datetime format, #2376 - cache: write_archive_index: truncate_and_unlink on error, #2628 - detect non-upgraded Attic repositories, #1933 - delete various nogil and threading related lines - coala / pylint related improvements - docs: - renew asciinema/screencasts, #669 - create: document exclusion through nodump, #2949 - minor formatting fixes - tar: tarpipe example - improve "with-lock" and "info" docs, #2869 - detail how to use macOS/GNOME/KDE keyrings for repo passwords, #392 - travis: only short-circuit docs-only changes for pull requests - vagrant: - netbsd: bash is already installed - fix netbsd version in PKG_PATH - add exe location to PATH when we build an exe Version 1.1.0rc1 (2017-07-24) ----------------------------- Compatibility notes: - delete: removed short option for --cache-only New features: - support borg list repo --format {comment} {bcomment} {end}, #2081 - key import: allow reading from stdin, #2760 Fixes: - with-lock: avoid creating segment files that might be overwritten later, #1867 - prune: fix checkpoints processing with --glob-archives - FUSE: versions view: keep original file extension at end, #2769 - fix --last, --first: do not accept values <= 0, fix reversed archive ordering with --last - include testsuite data (attic.tar.gz) when installing the package - use limited unpacker for outer key, for manifest (both security precautions), #2174 #2175 - fix bashism in shell scripts, #2820, #2816 - cleanup endianness detection, create _endian.h, fixes build on alpine linux, #2809 - fix crash with --no-cache-sync (give known chunk size to chunk_incref), #2853 Other changes: - FUSE: versions view: linear numbering by archive time - split up interval parsing from filtering for --keep-within, #2610 - add a basic .editorconfig, #2734 - use archive creation time as mtime for FUSE mount, #2834 - upgrade FUSE for macOS (osxfuse) from 3.5.8 to 3.6.3, #2706 - hashindex: speed up by replacing modulo with "if" to check for wraparound - coala checker / pylint: fixed requirements and .coafile, more ignores - borg upgrade: name backup directories as 'before-upgrade', #2811 - add .mailmap - some minor changes suggested by lgtm.com - docs: - better explanation of the --ignore-inode option relevance, #2800 - fix openSUSE command and add openSUSE section - simplify ssh authorized_keys file using "restrict", add legacy note, #2121 - mount: show usage of archive filters - mount: add repository example, #2462 - info: update and add examples, #2765 - prune: include example - improved style / formatting - improved/fixed segments_per_dir docs - recreate: fix wrong "remove unwanted files" example - reference list of status chars in borg recreate --filter description - update source-install docs about doc build dependencies, #2795 - cleanup installation docs - file system requirements, update segs per dir - fix checkpoints/parts reference in FAQ, #2859 - code: - hashindex: don't pass side effect into macro - crypto low_level: don't mutate local bytes() - use dash_open function to open file or "-" for stdin/stdout - archiver: argparse cleanup / refactoring - shellpattern: add match_end arg - tests: added some additional unit tests, some fixes, #2700 #2710 - vagrant: fix setup of cygwin, add Debian 9 "stretch" - travis: don't perform full travis build on docs-only changes, #2531 Version 1.1.0b6 (2017-06-18) ---------------------------- Compatibility notes: - Running "borg init" via a "borg serve --append-only" server will *not* create an append-only repository anymore. Use "borg init --append-only" to initialize an append-only repository. - Repositories in the "repokey" and "repokey-blake2" modes with an empty passphrase are now treated as unencrypted repositories for security checks (e.g. BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK). Previously there would be no prompts nor messages if an unknown repository in one of these modes with an empty passphrase was encountered. This would allow an attacker to swap a repository, if one assumed that the lack of password prompts was due to a set BORG_PASSPHRASE. Since the "trick" does not work if BORG_PASSPHRASE is set, this does generally not affect scripts. - Repositories in the "authenticated" mode are now treated as the unencrypted repositories they are. - The client-side temporary repository cache now holds unencrypted data for better speed. - borg init: removed the short form of --append-only (-a). - borg upgrade: removed the short form of --inplace (-i). New features: - reimplemented the RepositoryCache, size-limited caching of decrypted repo contents, integrity checked via xxh64. #2515 - reduced space usage of chunks.archive.d. Existing caches are migrated during a cache sync. #235 #2638 - integrity checking using xxh64 for important files used by borg, #1101: - repository: index and hints files - cache: chunks and files caches, chunks.archive.d - improve cache sync speed, #1729 - create: new --no-cache-sync option - add repository mandatory feature flags infrastructure, #1806 - Verify most operations against SecurityManager. Location, manifest timestamp and key types are now checked for almost all non-debug commands. #2487 - implement storage quotas, #2517 - serve: add --restrict-to-repository, #2589 - BORG_PASSCOMMAND: use external tool providing the key passphrase, #2573 - borg export-tar, #2519 - list: --json-lines instead of --json for archive contents, #2439 - add --debug-profile option (and also "borg debug convert-profile"), #2473 - implement --glob-archives/-a, #2448 - normalize authenticated key modes for better naming consistency: - rename "authenticated" to "authenticated-blake2" (uses blake2b) - implement "authenticated" mode (uses hmac-sha256) Fixes: - hashindex: read/write indices >2 GiB on 32bit systems, better error reporting, #2496 - repository URLs: implement IPv6 address support and also more informative error message when parsing fails. - mount: check whether llfuse is installed before asking for passphrase, #2540 - mount: do pre-mount checks before opening repository, #2541 - FUSE: - fix crash if empty (None) xattr is read, #2534 - fix read(2) caching data in metadata cache - fix negative uid/gid crash (fix crash when mounting archives of external drives made on cygwin), #2674 - redo ItemCache, on top of object cache - use decrypted cache - remove unnecessary normpaths - serve: ignore --append-only when initializing a repository (borg init), #2501 - serve: fix incorrect type of exception_short for Errors, #2513 - fix --exclude and --exclude-from recursing into directories, #2469 - init: don't allow creating nested repositories, #2563 - --json: fix encryption[mode] not being the cmdline name - remote: propagate Error.traceback correctly - fix remote logging and progress, #2241 - implement --debug-topic for remote servers - remote: restore "Remote:" prefix (as used in 1.0.x) - rpc negotiate: enable v3 log protocol only for supported clients - fix --progress and logging in general for remote - fix parse_version, add tests, #2556 - repository: truncate segments (and also some other files) before unlinking, #2557 - recreate: keep timestamps as in original archive, #2384 - recreate: if single archive is not processed, exit 2 - patterns: don't recurse with ! / --exclude for pf:, #2509 - cache sync: fix n^2 behaviour in lookup_name - extract: don't write to disk with --stdout (affected non-regular-file items), #2645 - hashindex: implement KeyError, more tests Other changes: - remote: show path in PathNotAllowed - consider repokey w/o passphrase == unencrypted, #2169 - consider authenticated mode == unencrypted, #2503 - restrict key file names, #2560 - document follow_symlinks requirements, check libc, use stat and chown with follow_symlinks=False, #2507 - support common options on the main command, #2508 - support common options on mid-level commands (e.g. borg *key* export) - make --progress a common option - increase DEFAULT_SEGMENTS_PER_DIR to 1000 - chunker: fix invalid use of types (function only used by tests) - chunker: don't do uint32_t >> 32 - FUSE: - add instrumentation (--debug and SIGUSR1/SIGINFO) - reduced memory usage for repository mounts by lazily instantiating archives - improved archive load times - info: use CacheSynchronizer & HashIndex.stats_against (better performance) - docs: - init: document --encryption as required - security: OpenSSL usage - security: used implementations; note python libraries - security: security track record of OpenSSL and msgpack - patterns: document denial of service (regex, wildcards) - init: note possible denial of service with "none" mode - init: document SHA extension is supported in OpenSSL and thus SHA is faster on AMD Ryzen than blake2b. - book: use A4 format, new builder option format. - book: create appendices - data structures: explain repository compaction - data structures: add chunk layout diagram - data structures: integrity checking - data structures: demingle cache and repo index - Attic FAQ: separate section for attic stuff - FAQ: I get an IntegrityError or similar - what now? - FAQ: Can I use Borg on SMR hard drives?, #2252 - FAQ: specify "using inline shell scripts" - add systemd warning regarding placeholders, #2543 - xattr: document API - add docs/misc/borg-data-flow data flow chart - debugging facilities - README: how to help the project, #2550 - README: add bountysource badge, #2558 - fresh new theme + tweaking - logo: vectorized (PDF and SVG) versions - frontends: use headlines - you can link to them - mark --pattern, --patterns-from as experimental - highlight experimental features in online docs - remove regex based pattern examples, #2458 - nanorst for "borg help TOPIC" and --help - split deployment - deployment: hosting repositories - deployment: automated backups to a local hard drive - development: vagrant, windows10 requirements - development: update docs remarks - split usage docs, #2627 - usage: avoid bash highlight, [options] instead of - usage: add benchmark page - helpers: truncate_and_unlink doc - don't suggest to leak BORG_PASSPHRASE - internals: columnize rather long ToC [webkit fixup] internals: manifest & feature flags - internals: more HashIndex details - internals: fix ASCII art equations - internals: edited obj graph related sections a bit - internals: layers image + description - fix way too small figures in pdf - index: disable syntax highlight (bash) - improve options formatting, fix accidental block quotes - testing / checking: - add support for using coala, #1366 - testsuite: add ArchiverCorruptionTestCase - do not test logger name, #2504 - call setup_logging after destroying logging config - testsuite.archiver: normalise pytest.raises vs. assert_raises - add test for preserved intermediate folder permissions, #2477 - key: add round-trip test - remove attic dependency of the tests, #2505 - enable remote tests on cygwin - tests: suppress tar's future timestamp warning - cache sync: add more refcount tests - repository: add tests, including corruption tests - vagrant: - control VM cpus and pytest workers via env vars VMCPUS and XDISTN - update cleaning workdir - fix openbsd shell - add OpenIndiana - packaging: - binaries: don't bundle libssl - setup.py clean to remove compiled files - fail in borg package if version metadata is very broken (setuptools_scm) - repo / code structure: - create borg.algorithms and borg.crypto packages - algorithms: rename crc32 to checksums - move patterns to module, #2469 - gitignore: complete paths for src/ excludes - cache: extract CacheConfig class - implement IntegrityCheckedFile + Detached variant, #2502 #1688 - introduce popen_with_error_handling to handle common user errors Version 1.1.0b5 (2017-04-30) ---------------------------- Compatibility notes: - BORG_HOSTNAME_IS_UNIQUE is now on by default. - removed --compression-from feature - recreate: add --recompress flag, unify --always-recompress and --recompress Fixes: - catch exception for os.link when hardlinks are not supported, #2405 - borg rename / recreate: expand placeholders, #2386 - generic support for hardlinks (files, devices, FIFOs), #2324 - extract: also create parent dir for device files, if needed, #2358 - extract: if a hardlink master is not in the to-be-extracted subset, the "x" status was not displayed for it, #2351 - embrace y2038 issue to support 32bit platforms: clamp timestamps to int32, #2347 - verify_data: fix IntegrityError handling for defect chunks, #2442 - allow excluding parent and including child, #2314 Other changes: - refactor compression decision stuff - change global compression default to lz4 as well, to be consistent with --compression defaults. - placeholders: deny access to internals and other unspecified stuff - clearer error message for unrecognized placeholder - more clear exception if borg check does not help, #2427 - vagrant: upgrade FUSE for macOS to 3.5.8, #2346 - linux binary builds: get rid of glibc 2.13 dependency, #2430 - docs: - placeholders: document escaping - serve: env vars in original commands are ignored - tell what kind of hardlinks we support - more docs about compression - LICENSE: use canonical formulation ("copyright holders and contributors" instead of "author") - document borg init behaviour via append-only borg serve, #2440 - be clear about what buzhash is used for, #2390 - add hint about chunker params, #2421 - clarify borg upgrade docs, #2436 - FAQ to explain warning when running borg check --repair, #2341 - repository file system requirements, #2080 - pre-install considerations - misc. formatting / crossref fixes - tests: - enhance travis setuptools_scm situation - add extra test for the hashindex - fix invalid param issue in benchmarks These belong to 1.1.0b4 release, but did not make it into changelog by then: - vagrant: increase memory for parallel testing - lz4 compress: lower max. buffer size, exception handling - add docstring to do_benchmark_crud - patterns help: mention path full-match in intro Version 1.1.0b4 (2017-03-27) ---------------------------- Compatibility notes: - init: the --encryption argument is mandatory now (there are several choices) - moved "borg migrate-to-repokey" to "borg key migrate-to-repokey". - "borg change-passphrase" is deprecated, use "borg key change-passphrase" instead. - the --exclude-if-present option now supports tagging a folder with any filesystem object type (file, folder, etc), instead of expecting only files as tags, #1999 - the --keep-tag-files option has been deprecated in favor of the new --keep-exclude-tags, to account for the change mentioned above. - use lz4 compression by default, #2179 New features: - JSON API to make developing frontends and automation easier (see :ref:`json_output`) - add JSON output to commands: `borg create/list/info --json ...`. - add --log-json option for structured logging output. - add JSON progress information, JSON support for confirmations (yes()). - add two new options --pattern and --patterns-from as discussed in #1406 - new path full match pattern style (pf:) for very fast matching, #2334 - add 'debug dump-manifest' and 'debug dump-archive' commands - add 'borg benchmark crud' command, #1788 - new 'borg delete --force --force' to delete severely corrupted archives, #1975 - info: show utilization of maximum archive size, #1452 - list: add dsize and dcsize keys, #2164 - paperkey.html: Add interactive html template for printing key backups. - key export: add qr html export mode - securely erase config file (which might have old encryption key), #2257 - archived file items: add size to metadata, 'borg extract' and 'borg check' do check the file size for consistency, FUSE uses precomputed size from Item. Fixes: - fix remote speed regression introduced in 1.1.0b3, #2185 - fix regression handling timestamps beyond 2262 (revert bigint removal), introduced in 1.1.0b3, #2321 - clamp (nano)second values to unproblematic range, #2304 - hashindex: rebuild hashtable if we have too little empty buckets (performance fix), #2246 - Location regex: fix bad parsing of wrong syntax - ignore posix_fadvise errors in repository.py, #2095 - borg rpc: use limited msgpack.Unpacker (security precaution), #2139 - Manifest: Make sure manifest timestamp is strictly monotonically increasing. - create: handle BackupOSError on a per-path level in one spot - create: clarify -x option / meaning of "same filesystem" - create: don't create hard link refs to failed files - archive check: detect and fix missing all-zero replacement chunks, #2180 - files cache: update inode number when --ignore-inode is used, #2226 - fix decompression exceptions crashing ``check --verify-data`` and others instead of reporting integrity error, #2224 #2221 - extract: warning for unextracted big extended attributes, #2258, #2161 - mount: umount on SIGINT/^C when in foreground - mount: handle invalid hard link refs - mount: fix huge RAM consumption when mounting a repository (saves number of archives * 8 MiB), #2308 - hashindex: detect mingw byte order #2073 - hashindex: fix wrong skip_hint on hashindex_set when encountering tombstones, the regression was introduced in #1748 - fix ChunkIndex.__contains__ assertion for big-endian archs - fix borg key/debug/benchmark crashing without subcommand, #2240 - Location: accept //servername/share/path - correct/refactor calculation of unique/non-unique chunks - extract: fix missing call to ProgressIndicator.finish - prune: fix error msg, it is --keep-within, not --within - fix "auto" compression mode bug (not compressing), #2331 - fix symlink item fs size computation, #2344 Other changes: - remote repository: improved async exception processing, #2255 #2225 - with --compression auto,C, only use C if lz4 achieves at least 3% compression - PatternMatcher: only normalize path once, #2338 - hashindex: separate endian-dependent defs from endian detection - migrate-to-repokey: ask using canonical_path() as we do everywhere else. - SyncFile: fix use of fd object after close - make LoggedIO.close_segment reentrant - creating a new segment: use "xb" mode, #2099 - redo key_creator, key_factory, centralise key knowledge, #2272 - add return code functions, #2199 - list: only load cache if needed - list: files->items, clarifications - list: add "name" key for consistency with info cmd - ArchiveFormatter: add "start" key for compatibility with "info" - RemoteRepository: account rx/tx bytes - setup.py build_usage/build_man/build_api fixes - Manifest.in: simplify, exclude .so, .dll and .orig, #2066 - FUSE: get rid of chunk accounting, st_blocks = ceil(size / blocksize). - tests: - help python development by testing 3.6-dev - test for borg delete --force - vagrant: - freebsd: some fixes, #2067 - darwin64: use osxfuse 3.5.4 for tests / to build binaries - darwin64: improve VM settings - use python 3.5.3 to build binaries, #2078 - upgrade pyinstaller from 3.1.1+ to 3.2.1 - pyinstaller: use fixed AND freshly compiled bootloader, #2002 - pyinstaller: automatically builds bootloader if missing - docs: - create really nice man pages - faq: mention --remote-ratelimit in bandwidth limit question - fix caskroom link, #2299 - docs/security: reiterate that RPC in Borg does no networking - docs/security: counter tracking, #2266 - docs/development: update merge remarks - address SSH batch mode in docs, #2202 #2270 - add warning about running build_usage on Python >3.4, #2123 - one link per distro in the installation page - improve --exclude-if-present and --keep-exclude-tags, #2268 - improve automated backup script in doc, #2214 - improve remote-path description - update docs for create -C default change (lz4) - document relative path usage, #1868 - document snapshot usage, #2178 - corrected some stuff in internals+security - internals: move toctree to after the introduction text - clarify metadata kind, manifest ops - key enc: correct / clarify some stuff, link to internals/security - datas: enc: 1.1.x mas different MACs - datas: enc: correct factual error -- no nonce involved there. - make internals.rst an index page and edit it a bit - add "Cryptography in Borg" and "Remote RPC protocol security" sections - document BORG_HOSTNAME_IS_UNIQUE, #2087 - FAQ by categories as proposed by @anarcat in #1802 - FAQ: update Which file types, attributes, etc. are *not* preserved? - development: new branching model for git repository - development: define "ours" merge strategy for auto-generated files - create: move --exclude note to main doc - create: move item flags to main doc - fix examples using borg init without -e/--encryption - list: don't print key listings in fat (html + man) - remove Python API docs (were very incomplete, build problems on RTFD) - added FAQ section about backing up root partition Version 1.0.10 (2017-02-13) --------------------------- Bug fixes: - Manifest timestamps are now monotonically increasing, this fixes issues when the system clock jumps backwards or is set inconsistently across computers accessing the same repository, #2115 - Fixed testing regression in 1.0.10rc1 that lead to a hard dependency on py.test >= 3.0, #2112 New features: - "key export" can now generate a printable HTML page with both a QR code and a human-readable "paperkey" representation (and custom text) through the ``--qr-html`` option. The same functionality is also available through `paperkey.html `_, which is the same HTML page generated by ``--qr-html``. It works with existing "key export" files and key files. Other changes: - docs: - language clarification - "borg create --one-file-system" option does not respect mount points, but considers different file systems instead, #2141 - setup.py: build_api: sort file list for determinism Version 1.1.0b3 (2017-01-15) ---------------------------- Compatibility notes: - borg init: removed the default of "--encryption/-e", #1979 This was done so users do a informed decision about -e mode. Bug fixes: - borg recreate: don't rechunkify unless explicitly told so - borg info: fixed bug when called without arguments, #1914 - borg init: fix free space check crashing if disk is full, #1821 - borg debug delete/get obj: fix wrong reference to exception - fix processing of remote ~/ and ~user/ paths (regressed since 1.1.0b1), #1759 - posix platform module: only build / import on non-win32 platforms, #2041 New features: - new CRC32 implementations that are much faster than the zlib one used previously, #1970 - add blake2b key modes (use blake2b as MAC). This links against system libb2, if possible, otherwise uses bundled code - automatically remove stale locks - set BORG_HOSTNAME_IS_UNIQUE env var to enable stale lock killing. If set, stale locks in both cache and repository are deleted. #562 #1253 - borg info : print general repo information, #1680 - borg check --first / --last / --sort / --prefix, #1663 - borg mount --first / --last / --sort / --prefix, #1542 - implement "health" item formatter key, #1749 - BORG_SECURITY_DIR to remember security related infos outside the cache. Key type, location and manifest timestamp checks now survive cache deletion. This also means that you can now delete your cache and avoid previous warnings, since Borg can still tell it's safe. - implement BORG_NEW_PASSPHRASE, #1768 Other changes: - borg recreate: - remove special-cased --dry-run - update --help - remove bloat: interruption blah, autocommit blah, resuming blah - re-use existing checkpoint functionality - archiver tests: add check_cache tool - lints refcounts - fixed cache sync performance regression from 1.1.0b1 onwards, #1940 - syncing the cache without chunks.archive.d (see :ref:`disable_archive_chunks`) now avoids any merges and is thus faster, #1940 - borg check --verify-data: faster due to linear on-disk-order scan - borg debug-xxx commands removed, we use "debug xxx" subcommands now, #1627 - improve metadata handling speed - shortcut hashindex_set by having hashindex_lookup hint about address - improve / add progress displays, #1721 - check for index vs. segment files object count mismatch - make RPC protocol more extensible: use named parameters. - RemoteRepository: misc. code cleanups / refactors - clarify cache/repository README file - docs: - quickstart: add a comment about other (remote) filesystems - quickstart: only give one possible ssh url syntax, all others are documented in usage chapter. - mention file:// - document repo URLs / archive location - clarify borg diff help, #980 - deployment: synthesize alternative --restrict-to-path example - improve cache / index docs, esp. files cache docs, #1825 - document using "git merge 1.0-maint -s recursive -X rename-threshold=20%" for avoiding troubles when merging the 1.0-maint branch into master. - tests: - FUSE tests: catch ENOTSUP on freebsd - FUSE tests: test troublesome xattrs last - fix byte range error in test, #1740 - use monkeypatch to set env vars, but only on pytest based tests. - point XDG_*_HOME to temp dirs for tests, #1714 - remove all BORG_* env vars from the outer environment Version 1.0.10rc1 (2017-01-29) ------------------------------ Bug fixes: - borg serve: fix transmission data loss of pipe writes, #1268 This affects only the cygwin platform (not Linux, BSD, OS X). - Avoid triggering an ObjectiveFS bug in xattr retrieval, #1992 - When running out of buffer memory when reading xattrs, only skip the current file, #1993 - Fixed "borg upgrade --tam" crashing with unencrypted repositories. Since :ref:`the issue ` is not relevant for unencrypted repositories, it now does nothing and prints an error, #1981. - Fixed change-passphrase crashing with unencrypted repositories, #1978 - Fixed "borg check repo::archive" indicating success if "archive" does not exist, #1997 - borg check: print non-exit-code warning if --last or --prefix aren't fulfilled - fix bad parsing of wrong repo location syntax - create: don't create hard link refs to failed files, mount: handle invalid hard link refs, #2092 - detect mingw byte order, #2073 - creating a new segment: use "xb" mode, #2099 - mount: umount on SIGINT/^C when in foreground, #2082 Other changes: - binary: use fixed AND freshly compiled pyinstaller bootloader, #2002 - xattr: ignore empty names returned by llistxattr(2) et al - Enable the fault handler: install handlers for the SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL signals to dump the Python traceback. - Also print a traceback on SIGUSR2. - borg change-passphrase: print key location (simplify making a backup of it) - officially support Python 3.6 (setup.py: add Python 3.6 qualifier) - tests: - vagrant / travis / tox: add Python 3.6 based testing - vagrant: fix openbsd repo, #2042 - vagrant: fix the freebsd64 machine, #2037 #2067 - vagrant: use python 3.5.3 to build binaries, #2078 - vagrant: use osxfuse 3.5.4 for tests / to build binaries vagrant: improve darwin64 VM settings - travis: fix osxfuse install (fixes OS X testing on Travis CI) - travis: require succeeding OS X tests, #2028 - travis: use latest pythons for OS X based testing - use pytest-xdist to parallelize testing - fix xattr test race condition, #2047 - setup.cfg: fix pytest deprecation warning, #2050 - docs: - language clarification - VM backup FAQ - borg create: document how to backup stdin, #2013 - borg upgrade: fix incorrect title levels - add CVE numbers for issues fixed in 1.0.9, #2106 - fix typos (taken from Debian package patch) - remote: include data hexdump in "unexpected RPC data" error message - remote: log SSH command line at debug level - API_VERSION: use numberspaces, #2023 - remove .github from pypi package, #2051 - add pip and setuptools to requirements file, #2030 - SyncFile: fix use of fd object after close (cosmetic) - Manifest.in: simplify, exclude \*.{so,dll,orig}, #2066 - ignore posix_fadvise errors in repository.py, #2095 (works around issues with docker on ARM) - make LoggedIO.close_segment reentrant, avoid reentrance Version 1.0.9 (2016-12-20) -------------------------- Security fixes: - A flaw in the cryptographic authentication scheme in Borg allowed an attacker to spoof the manifest. See :ref:`tam_vuln` above for the steps you should take. CVE-2016-10099 was assigned to this vulnerability. - borg check: When rebuilding the manifest (which should only be needed very rarely) duplicate archive names would be handled on a "first come first serve" basis, allowing an attacker to apparently replace archives. CVE-2016-10100 was assigned to this vulnerability. Bug fixes: - borg check: - rebuild manifest if it's corrupted - skip corrupted chunks during manifest rebuild - fix TypeError in integrity error handler, #1903, #1894 - fix location parser for archives with @ char (regression introduced in 1.0.8), #1930 - fix wrong duration/timestamps if system clock jumped during a create - fix progress display not updating if system clock jumps backwards - fix checkpoint interval being incorrect if system clock jumps Other changes: - docs: - add python3-devel as a dependency for cygwin-based installation - clarify extract is relative to current directory - FAQ: fix link to changelog - markup fixes - tests: - test_get\_(cache|keys)_dir: clean env state, #1897 - get back pytest's pretty assertion failures, #1938 - setup.py build_usage: - fixed build_usage not processing all commands - fixed build_usage not generating includes for debug commands Version 1.0.9rc1 (2016-11-27) ----------------------------- Bug fixes: - files cache: fix determination of newest mtime in backup set (which is used in cache cleanup and led to wrong "A" [added] status for unchanged files in next backup), #1860. - borg check: - fix incorrectly reporting attic 0.13 and earlier archives as corrupt - handle repo w/o objects gracefully and also bail out early if repo is *completely* empty, #1815. - fix tox/pybuild in 1.0-maint - at xattr module import time, loggers are not initialized yet New features: - borg umount exposed already existing umount code via the CLI api, so users can use it, which is more consistent than using borg to mount and fusermount -u (or umount) to un-mount, #1855. - implement borg create --noatime --noctime, fixes #1853 Other changes: - docs: - display README correctly on PyPI - improve cache / index docs, esp. files cache docs, fixes #1825 - different pattern matching for --exclude, #1779 - datetime formatting examples for {now} placeholder, #1822 - clarify passphrase mode attic repo upgrade, #1854 - clarify --umask usage, #1859 - clarify how to choose PR target branch - clarify prune behavior for different archive contents, #1824 - fix PDF issues, add logo, fix authors, headings, TOC - move security verification to support section - fix links in standalone README (:ref: tags) - add link to security contact in README - add FAQ about security - move fork differences to FAQ - add more details about resource usage - tests: skip remote tests on cygwin, #1268 - travis: - allow OS X failures until the brew cask osxfuse issue is fixed - caskroom osxfuse-beta gone, it's osxfuse now (3.5.3) - vagrant: - upgrade OSXfuse / FUSE for macOS to 3.5.3 - remove llfuse from tox.ini at a central place - do not try to install llfuse on centos6 - fix FUSE test for darwin, #1546 - add windows virtual machine with cygwin - Vagrantfile cleanup / code deduplication Version 1.1.0b2 (2016-10-01) ---------------------------- Bug fixes: - fix incorrect preservation of delete tags, leading to "object count mismatch" on borg check, #1598. This only occurred with 1.1.0b1 (not with 1.0.x) and is normally fixed by running another borg create/delete/prune. - fix broken --progress for double-cell paths (e.g. CJK), #1624 - borg recreate: also catch SIGHUP - FUSE: - fix hardlinks in versions view, #1599 - add parameter check to ItemCache.get to make potential failures more clear New features: - Archiver, RemoteRepository: add --remote-ratelimit (send data) - borg help compression, #1582 - borg check: delete chunks with integrity errors, #1575, so they can be "repaired" immediately and maybe healed later. - archives filters concept (refactoring/unifying older code) - covers --first/--last/--prefix/--sort-by options - currently used for borg list/info/delete Other changes: - borg check --verify-data slightly tuned (use get_many()) - change {utcnow} and {now} to ISO-8601 format ("T" date/time separator) - repo check: log transaction IDs, improve object count mismatch diagnostic - Vagrantfile: use TW's fresh-bootloader pyinstaller branch - fix module names in api.rst - hashindex: bump api_version Version 1.1.0b1 (2016-08-28) ---------------------------- New features: - new commands: - borg recreate: re-create existing archives, #787 #686 #630 #70, also see #757, #770. - selectively remove files/dirs from old archives - re-compress data - re-chunkify data, e.g. to have upgraded Attic / Borg 0.xx archives deduplicate with Borg 1.x archives or to experiment with chunker-params. - borg diff: show differences between archives - borg with-lock: execute a command with the repository locked, #990 - borg create: - Flexible compression with pattern matching on path/filename, and LZ4 heuristic for deciding compressibility, #810, #1007 - visit files in inode order (better speed, esp. for large directories and rotating disks) - in-file checkpoints, #1217 - increased default checkpoint interval to 30 minutes (was 5 minutes), #896 - added uuid archive format tag, #1151 - save mountpoint directories with --one-file-system, makes system restore easier, #1033 - Linux: added support for some BSD flags, #1050 - add 'x' status for excluded paths, #814 - also means files excluded via UF_NODUMP, #1080 - borg check: - will not produce the "Checking segments" output unless new --progress option is passed, #824. - --verify-data to verify data cryptographically on the client, #975 - borg list, #751, #1179 - removed {formatkeys}, see "borg list --help" - --list-format is deprecated, use --format instead - --format now also applies to listing archives, not only archive contents, #1179 - now supports the usual [PATH [PATHS…]] syntax and excludes - new keys: csize, num_chunks, unique_chunks, NUL - supports guaranteed_available hashlib hashes (to avoid varying functionality depending on environment), which includes the SHA1 and SHA2 family as well as MD5 - borg prune: - to better visualize the "thinning out", we now list all archives in reverse time order. rephrase and reorder help text. - implement --keep-last N via --keep-secondly N, also --keep-minutely. assuming that there is not more than 1 backup archive made in 1s, --keep-last N and --keep-secondly N are equivalent, #537 - cleanup checkpoints except the latest, #1008 - borg extract: - added --progress, #1449 - Linux: limited support for BSD flags, #1050 - borg info: - output is now more similar to borg create --stats, #977 - borg mount: - provide "borgfs" wrapper for borg mount, enables usage via fstab, #743 - "versions" mount option - when used with a repository mount, this gives a merged, versioned view of the files in all archives, #729 - repository: - added progress information to commit/compaction phase (often takes some time when deleting/pruning), #1519 - automatic recovery for some forms of repository inconsistency, #858 - check free space before going forward with a commit, #1336 - improved write performance (esp. for rotating media), #985 - new IO code for Linux - raised default segment size to approx 512 MiB - improved compaction performance, #1041 - reduced client CPU load and improved performance for remote repositories, #940 - options that imply output (--show-rc, --show-version, --list, --stats, --progress) don't need -v/--info to have that output displayed, #865 - add archive comments (via borg (re)create --comment), #842 - borg list/prune/delete: also output archive id, #731 - --show-version: shows/logs the borg version, #725 - added --debug-topic for granular debug logging, #1447 - use atomic file writing/updating for configuration and key files, #1377 - BORG_KEY_FILE environment variable, #1001 - self-testing module, #970 Bug fixes: - list: fixed default output being produced if --format is given with empty parameter, #1489 - create: fixed overflowing progress line with CJK and similar characters, #1051 - prune: fixed crash if --prefix resulted in no matches, #1029 - init: clean up partial repo if passphrase input is aborted, #850 - info: quote cmdline arguments that have spaces in them - fix hardlinks failing in some cases for extracting subtrees, #761 Other changes: - replace stdlib hmac with OpenSSL, zero-copy decrypt (10-15% increase in performance of hash-lists and extract). - improved chunker performance, #1021 - open repository segment files in exclusive mode (fail-safe), #1134 - improved error logging, #1440 - Source: - pass meta-data around, #765 - move some constants to new constants module - better readability and fewer errors with namedtuples, #823 - moved source tree into src/ subdirectory, #1016 - made borg.platform a package, #1113 - removed dead crypto code, #1032 - improved and ported parts of the test suite to py.test, #912 - created data classes instead of passing dictionaries around, #981, #1158, #1161 - cleaned up imports, #1112 - Docs: - better help texts and sphinx reproduction of usage help: - Group options - Nicer list of options in Sphinx - Deduplicate 'Common options' (including --help) - chunker: added some insights by "Voltara", #903 - clarify what "deduplicated size" means - fix / update / add package list entries - added a SaltStack usage example, #956 - expanded FAQ - new contributors in AUTHORS! - Tests: - vagrant: add ubuntu/xenial 64bit - this box has still some issues - ChunkBuffer: add test for leaving partial chunk in buffer, fixes #945 Version 1.0.8 (2016-10-29) -------------------------- Bug fixes: - RemoteRepository: Fix busy wait in call_many, #940 New features: - implement borgmajor/borgminor/borgpatch placeholders, #1694 {borgversion} was already there (full version string). With the new placeholders you can now also get e.g. 1 or 1.0 or 1.0.8. Other changes: - avoid previous_location mismatch, #1741 due to the changed canonicalization for relative paths in PR #1711 / #1655 (implement /./ relpath hack), there would be a changed repo location warning and the user would be asked if this is ok. this would break automation and require manual intervention, which is unwanted. thus, we automatically fix the previous_location config entry, if it only changed in the expected way, but still means the same location. - docs: - deployment.rst: do not use bare variables in ansible snippet - add clarification about append-only mode, #1689 - setup.py: add comment about requiring llfuse, #1726 - update usage.rst / api.rst - repo url / archive location docs + typo fix - quickstart: add a comment about other (remote) filesystems - vagrant / tests: - no chown when rsyncing (fixes boxes w/o vagrant group) - fix FUSE permission issues on linux/freebsd, #1544 - skip FUSE test for borg binary + fakeroot - ignore security.selinux xattrs, fixes tests on centos, #1735 Version 1.0.8rc1 (2016-10-17) ----------------------------- Bug fixes: - fix signal handling (SIGINT, SIGTERM, SIGHUP), #1620 #1593 Fixes e.g. leftover lock files for quickly repeated signals (e.g. Ctrl-C Ctrl-C) or lost connections or systemd sending SIGHUP. - progress display: adapt formatting to narrow screens, do not crash, #1628 - borg create --read-special - fix crash on broken symlink, #1584. also correctly processes broken symlinks. before this regressed to a crash (5b45385) a broken symlink would've been skipped. - process_symlink: fix missing backup_io() Fixes a chmod/chown/chgrp/unlink/rename/... crash race between getting dirents and dispatching to process_symlink. - yes(): abort on wrong answers, saying so, #1622 - fixed exception borg serve raised when connection was closed before reposiory was openend. add an error message for this. - fix read-from-closed-FD issue, #1551 (this seems not to get triggered in 1.0.x, but was discovered in master) - hashindex: fix iterators (always raise StopIteration when exhausted) (this seems not to get triggered in 1.0.x, but was discovered in master) - enable relative paths in ssh:// repo URLs, via /./relpath hack, #1655 - allow repo paths with colons, #1705 - update changed repo location immediately after acceptance, #1524 - fix debug get-obj / delete-obj crash if object not found and remote repo, #1684 - pyinstaller: use a spec file to build borg.exe binary, exclude osxfuse dylib on Mac OS X (avoids mismatch lib <-> driver), #1619 New features: - add "borg key export" / "borg key import" commands, #1555, so users are able to backup / restore their encryption keys more easily. Supported formats are the keyfile format used by borg internally and a special "paper" format with by line checksums for printed backups. For the paper format, the import is an interactive process which checks each line as soon as it is input. - add "borg debug-refcount-obj" to determine a repo objects' referrer counts, #1352 Other changes: - add "borg debug ..." subcommands (borg debug-* still works, but will be removed in borg 1.1) - setup.py: Add subcommand support to build_usage. - remote: change exception message for unexpected RPC data format to indicate dataflow direction. - improved messages / error reporting: - IntegrityError: add placeholder for message, so that the message we give appears not only in the traceback, but also in the (short) error message, #1572 - borg.key: include chunk id in exception msgs, #1571 - better messages for cache newer than repo, #1700 - vagrant (testing/build VMs): - upgrade OSXfuse / FUSE for macOS to 3.5.2 - update Debian Wheezy boxes, #1686 - openbsd / netbsd: use own boxes, fixes misc rsync installation and FUSE/llfuse related testing issues, #1695 #1696 #1670 #1671 #1728 - docs: - add docs for "key export" and "key import" commands, #1641 - fix inconsistency in FAQ (pv-wrapper). - fix second block in "Easy to use" section not showing on GitHub, #1576 - add bestpractices badge - link reference docs and faq about BORG_FILES_CACHE_TTL, #1561 - improve borg info --help, explain size infos, #1532 - add release signing key / security contact to README, #1560 - add contribution guidelines for developers - development.rst: add sphinx_rtd_theme to the sphinx install command - adjust border color in borg.css - add debug-info usage help file - internals.rst: fix typos - setup.py: fix build_usage to always process all commands - added docs explaining multiple --restrict-to-path flags, #1602 - add more specific warning about write-access debug commands, #1587 - clarify FAQ regarding backup of virtual machines, #1672 - tests: - work around FUSE xattr test issue with recent fakeroot - simplify repo/hashindex tests - travis: test FUSE-enabled borg, use trusty to have a recent FUSE - re-enable FUSE tests for RemoteArchiver (no deadlocks any more) - clean env for pytest based tests, #1714 - fuse_mount contextmanager: accept any options Version 1.0.7 (2016-08-19) -------------------------- Security fixes: - borg serve: fix security issue with remote repository access, #1428 If you used e.g. --restrict-to-path /path/client1/ (with or without trailing slash does not make a difference), it acted like a path prefix match using /path/client1 (note the missing trailing slash) - the code then also allowed working in e.g. /path/client13 or /path/client1000. As this could accidentally lead to major security/privacy issues depending on the paths you use, the behaviour was changed to be a strict directory match. That means --restrict-to-path /path/client1 (with or without trailing slash does not make a difference) now uses /path/client1/ internally (note the trailing slash here!) for matching and allows precisely that path AND any path below it. So, /path/client1 is allowed, /path/client1/repo1 is allowed, but not /path/client13 or /path/client1000. If you willingly used the undocumented (dangerous) previous behaviour, you may need to rearrange your --restrict-to-path paths now. We are sorry if that causes work for you, but we did not want a potentially dangerous behaviour in the software (not even using a for-backwards-compat option). Bug fixes: - fixed repeated LockTimeout exceptions when borg serve tried to write into a already write-locked repo (e.g. by a borg mount), #502 part b) This was solved by the fix for #1220 in 1.0.7rc1 already. - fix cosmetics + file leftover for "not a valid borg repository", #1490 - Cache: release lock if cache is invalid, #1501 - borg extract --strip-components: fix leak of preloaded chunk contents - Repository, when a InvalidRepository exception happens: - fix spurious, empty lock.roster - fix repo not closed cleanly New features: - implement borg debug-info, fixes #1122 (just calls already existing code via cli, same output as below tracebacks) Other changes: - skip the O_NOATIME test on GNU Hurd, fixes #1315 (this is a very minor issue and the GNU Hurd project knows the bug) - document using a clean repo to test / build the release Version 1.0.7rc2 (2016-08-13) ----------------------------- Bug fixes: - do not write objects to repository that are bigger than the allowed size, borg will reject reading them, #1451. Important: if you created archives with many millions of files or directories, please verify if you can open them successfully, e.g. try a "borg list REPO::ARCHIVE". - lz4 compression: dynamically enlarge the (de)compression buffer, the static buffer was not big enough for archives with extremely many items, #1453 - larger item metadata stream chunks, raise archive item limit by 8x, #1452 - fix untracked segments made by moved DELETEs, #1442 Impact: Previously (metadata) segments could become untracked when deleting data, these would never be cleaned up. - extended attributes (xattrs) related fixes: - fixed a race condition in xattrs querying that led to the entire file not being backed up (while logging the error, exit code = 1), #1469 - fixed a race condition in xattrs querying that led to a crash, #1462 - raise OSError including the error message derived from errno, deal with path being a integer FD Other changes: - print active env var override by default, #1467 - xattr module: refactor code, deduplicate, clean up - repository: split object size check into too small and too big - add a transaction_id assertion, so borg init on a broken (inconsistent) filesystem does not look like a coding error in borg, but points to the real problem. - explain confusing TypeError caused by compat support for old servers, #1456 - add forgotten usage help file from build_usage - refactor/unify buffer code into helpers.Buffer class, add tests - docs: - document archive limitation, #1452 - improve prune examples Version 1.0.7rc1 (2016-08-05) ----------------------------- Bug fixes: - fix repo lock deadlocks (related to lock upgrade), #1220 - catch unpacker exceptions, resync, #1351 - fix borg break-lock ignoring BORG_REPO env var, #1324 - files cache performance fixes (fixes unnecessary re-reading/chunking/ hashing of unmodified files for some use cases): - fix unintended file cache eviction, #1430 - implement BORG_FILES_CACHE_TTL, update FAQ, raise default TTL from 10 to 20, #1338 - FUSE: - cache partially read data chunks (performance), #965, #966 - always create a root dir, #1125 - use an OrderedDict for helptext, making the build reproducible, #1346 - RemoteRepository init: always call close on exceptions, #1370 (cosmetic) - ignore stdout/stderr broken pipe errors (cosmetic), #1116 New features: - better borg versions management support (useful esp. for borg servers wanting to offer multiple borg versions and for clients wanting to choose a specific server borg version), #1392: - add BORG_VERSION environment variable before executing "borg serve" via ssh - add new placeholder {borgversion} - substitute placeholders in --remote-path - borg init --append-only option (makes using the more secure append-only mode more convenient. when used remotely, this requires 1.0.7+ also on the borg server), #1291. Other changes: - Vagrantfile: - darwin64: upgrade to FUSE for macOS 3.4.1 (aka osxfuse), #1378 - xenial64: use user "ubuntu", not "vagrant" (as usual), #1331 - tests: - fix FUSE tests on OS X, #1433 - docs: - FAQ: add backup using stable filesystem names recommendation - FAQ about glibc compatibility added, #491, glibc-check improved - FAQ: 'A' unchanged file; remove ambiguous entry age sentence. - OS X: install pkg-config to build with FUSE support, fixes #1400 - add notes about shell/sudo pitfalls with env. vars, #1380 - added platform feature matrix - implement borg debug-dump-repo-objs Version 1.0.6 (2016-07-12) -------------------------- Bug fixes: - Linux: handle multiple LD_PRELOAD entries correctly, #1314, #1111 - Fix crash with unclear message if the libc is not found, #1314, #1111 Other changes: - tests: - Fixed O_NOATIME tests for Solaris and GNU Hurd, #1315 - Fixed sparse file tests for (file) systems not supporting it, #1310 - docs: - Fixed syntax highlighting, #1313 - misc docs: added data processing overview picture Version 1.0.6rc1 (2016-07-10) ----------------------------- New features: - borg check --repair: heal damaged files if missing chunks re-appear (e.g. if the previously missing chunk was added again in a later backup archive), #148. (*) Also improved logging. Bug fixes: - sync_dir: silence fsync() failing with EINVAL, #1287 Some network filesystems (like smbfs) don't support this and we use this in repository code. - borg mount (FUSE): - fix directories being shadowed when contained paths were also specified, #1295 - raise I/O Error (EIO) on damaged files (unless -o allow_damaged_files is used), #1302. (*) - borg extract: warn if a damaged file is extracted, #1299. (*) - Added some missing return code checks (ChunkIndex._add, hashindex_resize). - borg check: fix/optimize initial hash table size, avoids resize of the table. Other changes: - tests: - add more FUSE tests, #1284 - deduplicate FUSE (u)mount code - fix borg binary test issues, #862 - docs: - changelog: added release dates to older borg releases - fix some sphinx (docs generator) warnings, #881 Notes: (*) Some features depend on information (chunks_healthy list) added to item metadata when a file with missing chunks was "repaired" using all-zero replacement chunks. The chunks_healthy list is generated since borg 1.0.4, thus borg can't recognize such "repaired" (but content-damaged) files if the repair was done with an older borg version. Version 1.0.5 (2016-07-07) -------------------------- Bug fixes: - borg mount: fix FUSE crash in xattr code on Linux introduced in 1.0.4, #1282 Other changes: - backport some FAQ entries from master branch - add release helper scripts - Vagrantfile: - centos6: no FUSE, don't build binary - add xz for redhat-like dists Version 1.0.4 (2016-07-07) -------------------------- New features: - borg serve --append-only, #1168 This was included because it was a simple change (append-only functionality was already present via repository config file) and makes better security now practically usable. - BORG_REMOTE_PATH environment variable, #1258 This was included because it was a simple change (--remote-path cli option was already present) and makes borg much easier to use if you need it. - Repository: cleanup incomplete transaction on "no space left" condition. In many cases, this can avoid a 100% full repo filesystem (which is very problematic as borg always needs free space - even to delete archives). Bug fixes: - Fix wrong handling and reporting of OSErrors in borg create, #1138. This was a serious issue: in the context of "borg create", errors like repository I/O errors (e.g. disk I/O errors, ssh repo connection errors) were handled badly and did not lead to a crash (which would be good for this case, because the repo transaction would be incomplete and trigger a transaction rollback to clean up). Now, error handling for source files is cleanly separated from every other error handling, so only problematic input files are logged and skipped. - Implement fail-safe error handling for borg extract. Note that this isn't nearly as critical as the borg create error handling bug, since nothing is written to the repo. So this was "merely" misleading error reporting. - Add missing error handler in directory attr restore loop. - repo: make sure write data hits disk before the commit tag (#1236) and also sync the containing directory. - FUSE: getxattr fail must use errno.ENOATTR, #1126 (fixes Mac OS X Finder malfunction: "zero bytes" file length, access denied) - borg check --repair: do not lose information about the good/original chunks. If we do not lose the original chunk IDs list when "repairing" a file (replacing missing chunks with all-zero chunks), we have a chance to "heal" the file back into its original state later, in case the chunks re-appear (e.g. in a fresh backup). Healing is not implemented yet, see #148. - fixes for --read-special mode: - ignore known files cache, #1241 - fake regular file mode, #1214 - improve symlinks handling, #1215 - remove passphrase from subprocess environment, #1105 - Ignore empty index file (will trigger index rebuild), #1195 - add missing placeholder support for --prefix, #1027 - improve exception handling for placeholder replacement - catch and format exceptions in arg parsing - helpers: fix "undefined name 'e'" in exception handler - better error handling for missing repo manifest, #1043 - borg delete: - make it possible to delete a repo without manifest - borg delete --forced allows to delete corrupted archives, #1139 - borg check: - make borg check work for empty repo - fix resync and msgpacked item qualifier, #1135 - rebuild_manifest: fix crash if 'name' or 'time' key were missing. - better validation of item metadata dicts, #1130 - better validation of archive metadata dicts - close the repo on exit - even if rollback did not work, #1197. This is rather cosmetic, it avoids repo closing in the destructor. - tests: - fix sparse file test, #1170 - flake8: ignore new F405, #1185 - catch "invalid argument" on cygwin, #257 - fix sparseness assertion in test prep, #1264 Other changes: - make borg build/work on OpenSSL 1.0 and 1.1, #1187 - docs / help: - fix / clarify prune help, #1143 - fix "patterns" help formatting - add missing docs / help about placeholders - resources: rename atticmatic to borgmatic - document sshd settings, #545 - more details about checkpoints, add split trick, #1171 - support docs: add freenode web chat link, #1175 - add prune visualization / example, #723 - add note that Fnmatch is default, #1247 - make clear that lzma levels > 6 are a waste of cpu cycles - add a "do not edit" note to auto-generated files, #1250 - update cygwin installation docs - repository interoperability with borg master (1.1dev) branch: - borg check: read item metadata keys from manifest, #1147 - read v2 hints files, #1235 - fix hints file "unknown version" error handling bug - tests: add tests for format_line - llfuse: update version requirement for freebsd - Vagrantfile: - use openbsd 5.9, #716 - do not install llfuse on netbsd (broken) - update OSXfuse to version 3.3.3 - use Python 3.5.2 to build the binaries - glibc compatibility checker: scripts/glibc_check.py - add .eggs to .gitignore Version 1.0.3 (2016-05-20) -------------------------- Bug fixes: - prune: avoid that checkpoints are kept and completed archives are deleted in a prune run), #997 - prune: fix commandline argument validation - some valid command lines were considered invalid (annoying, but harmless), #942 - fix capabilities extraction on Linux (set xattrs last, after chown()), #1069 - repository: fix commit tags being seen in data - when probing key files, do binary reads. avoids crash when non-borg binary files are located in borg's key files directory. - handle SIGTERM and make a clean exit - avoids orphan lock files. - repository cache: don't cache large objects (avoid using lots of temp. disk space), #1063 Other changes: - Vagrantfile: OS X: update osxfuse / install lzma package, #933 - setup.py: add check for platform_darwin.c - setup.py: on freebsd, use a llfuse release that builds ok - docs / help: - update readthedocs URLs, #991 - add missing docs for "borg break-lock", #992 - borg create help: add some words to about the archive name - borg create help: document format tags, #894 Version 1.0.2 (2016-04-16) -------------------------- Bug fixes: - fix malfunction and potential corruption on (nowadays rather rare) big-endian architectures or bi-endian archs in (rare) BE mode. #886, #889 cache resync / index merge was malfunctioning due to this, potentially leading to data loss. borg info had cosmetic issues (displayed wrong values). note: all (widespread) little-endian archs (like x86/x64) or bi-endian archs in (widespread) LE mode (like ARMEL, MIPSEL, ...) were NOT affected. - add overflow and range checks for 1st (special) uint32 of the hashindex values, switch from int32 to uint32. - fix so that refcount will never overflow, but just stick to max. value after a overflow would have occurred. - borg delete: fix --cache-only for broken caches, #874 Makes --cache-only idempotent: it won't fail if the cache is already deleted. - fixed borg create --one-file-system erroneously traversing into other filesystems (if starting fs device number was 0), #873 - workround a bug in Linux fadvise FADV_DONTNEED, #907 Other changes: - better test coverage for hashindex, incl. overflow testing, checking correct computations so endianness issues would be discovered. - reproducible doc for ProgressIndicator*, make the build reproducible. - use latest llfuse for vagrant machines - docs: - use /path/to/repo in examples, fixes #901 - fix confusing usage of "repo" as archive name (use "arch") Version 1.0.1 (2016-04-08) -------------------------- New features: Usually there are no new features in a bugfix release, but these were added due to their high impact on security/safety/speed or because they are fixes also: - append-only mode for repositories, #809, #36 (see docs) - borg create: add --ignore-inode option to make borg detect unmodified files even if your filesystem does not have stable inode numbers (like sshfs and possibly CIFS). - add options --warning, --error, --critical for missing log levels, #826. it's not recommended to suppress warnings or errors, but the user may decide this on his own. note: --warning is not given to borg serve so a <= 1.0.0 borg will still work as server (it is not needed as it is the default). do not use --error or --critical when using a <= 1.0.0 borg server. Bug fixes: - fix silently skipping EIO, #748 - add context manager for Repository (avoid orphan repository locks), #285 - do not sleep for >60s while waiting for lock, #773 - unpack file stats before passing to FUSE - fix build on illumos - don't try to backup doors or event ports (Solaris and derivates) - remove useless/misleading libc version display, #738 - test suite: reset exit code of persistent archiver, #844 - RemoteRepository: clean up pipe if remote open() fails - Remote: don't print tracebacks for Error exceptions handled downstream, #792 - if BORG_PASSPHRASE is present but wrong, don't prompt for password, but fail instead, #791 - ArchiveChecker: move "orphaned objects check skipped" to INFO log level, #826 - fix capitalization, add ellipses, change log level to debug for 2 messages, #798 Other changes: - update llfuse requirement, llfuse 1.0 works - update OS / dist packages on build machines, #717 - prefer showing --info over -v in usage help, #859 - docs: - fix cygwin requirements (gcc-g++) - document how to debug / file filesystem issues, #664 - fix reproducible build of api docs - RTD theme: CSS !important overwrite, #727 - Document logo font. Recreate logo png. Remove GIMP logo file. Version 1.0.0 (2016-03-05) -------------------------- The major release number change (0.x -> 1.x) indicates bigger incompatible changes, please read the compatibility notes, adapt / test your scripts and check your backup logs. Compatibility notes: - drop support for python 3.2 and 3.3, require 3.4 or 3.5, #221 #65 #490 note: we provide binaries that include python 3.5.1 and everything else needed. they are an option in case you are stuck with < 3.4 otherwise. - change encryption to be on by default (using "repokey" mode) - moved keyfile keys from ~/.borg/keys to ~/.config/borg/keys, you can either move them manually or run "borg upgrade " - remove support for --encryption=passphrase, use borg migrate-to-repokey to switch to repokey mode, #97 - remove deprecated --compression , use --compression zlib, instead in case of 0, you could also use --compression none - remove deprecated --hourly/daily/weekly/monthly/yearly use --keep-hourly/daily/weekly/monthly/yearly instead - remove deprecated --do-not-cross-mountpoints, use --one-file-system instead - disambiguate -p option, #563: - -p now is same as --progress - -P now is same as --prefix - remove deprecated "borg verify", use "borg extract --dry-run" instead - cleanup environment variable semantics, #355 the environment variables used to be "yes sayers" when set, this was conceptually generalized to "automatic answerers" and they just give their value as answer (as if you typed in that value when being asked). See the "usage" / "Environment Variables" section of the docs for details. - change the builtin default for --chunker-params, create 2MiB chunks, #343 --chunker-params new default: 19,23,21,4095 - old default: 10,23,16,4095 one of the biggest issues with borg < 1.0 (and also attic) was that it had a default target chunk size of 64kiB, thus it created a lot of chunks and thus also a huge chunk management overhead (high RAM and disk usage). please note that the new default won't change the chunks that you already have in your repository. the new big chunks do not deduplicate with the old small chunks, so expect your repo to grow at least by the size of every changed file and in the worst case (e.g. if your files cache was lost / is not used) by the size of every file (minus any compression you might use). in case you want to immediately see a much lower resource usage (RAM / disk) for chunks management, it might be better to start with a new repo than continuing in the existing repo (with an existing repo, you'ld have to wait until all archives with small chunks got pruned to see a lower resource usage). if you used the old --chunker-params default value (or if you did not use --chunker-params option at all) and you'ld like to continue using small chunks (and you accept the huge resource usage that comes with that), just explicitly use borg create --chunker-params=10,23,16,4095. - archive timestamps: the 'time' timestamp now refers to archive creation start time (was: end time), the new 'time_end' timestamp refers to archive creation end time. This might affect prune if your backups take rather long. if you give a timestamp via cli this is stored into 'time', therefore it now needs to mean archive creation start time. New features: - implement password roundtrip, #695 Bug fixes: - remote end does not need cache nor keys directories, do not create them, #701 - added retry counter for passwords, #703 Other changes: - fix compiler warnings, #697 - docs: - update README.rst to new changelog location in docs/changes.rst - add Teemu to AUTHORS - changes.rst: fix old chunker params, #698 - FAQ: how to limit bandwidth Version 1.0.0rc2 (2016-02-28) ----------------------------- New features: - format options for location: user, pid, fqdn, hostname, now, utcnow, user - borg list --list-format - borg prune -v --list enables the keep/prune list output, #658 Bug fixes: - fix _open_rb noatime handling, #657 - add a simple archivename validator, #680 - borg create --stats: show timestamps in localtime, use same labels/formatting as borg info, #651 - llfuse compatibility fixes (now compatible with: 0.40, 0.41, 0.42) Other changes: - it is now possible to use "pip install borgbackup[fuse]" to automatically install the llfuse dependency using the correct version requirement for it. you still need to care about having installed the FUSE / build related OS package first, though, so that building llfuse can succeed. - Vagrant: drop Ubuntu Precise (12.04) - does not have Python >= 3.4 - Vagrant: use pyinstaller v3.1.1 to build binaries - docs: - borg upgrade: add to docs that only LOCAL repos are supported - borg upgrade also handles borg 0.xx -> 1.0 - use pip extras or requirements file to install llfuse - fix order in release process - updated usage docs and other minor / cosmetic fixes - verified borg examples in docs, #644 - freebsd dependency installation and FUSE configuration, #649 - add example how to restore a raw device, #671 - add a hint about the dev headers needed when installing from source - add examples for delete (and handle delete after list, before prune), #656 - update example for borg create -v --stats (use iso datetime format), #663 - added example to BORG_RSH docs - "connection closed by remote": add FAQ entry and point to issue #636 Version 1.0.0rc1 (2016-02-07) ----------------------------- New features: - borg migrate-to-repokey ("passphrase" -> "repokey" encryption key mode) - implement --short for borg list REPO, #611 - implement --list for borg extract (consistency with borg create) - borg serve: overwrite client's --restrict-to-path with ssh forced command's option value (but keep everything else from the client commandline), #544 - use $XDG_CONFIG_HOME/keys for keyfile keys (~/.config/borg/keys), #515 - "borg upgrade" moves the keyfile keys to the new location - display both archive creation start and end time in "borg info", #627 Bug fixes: - normalize trailing slashes for the repository path, #606 - Cache: fix exception handling in __init__, release lock, #610 Other changes: - suppress unneeded exception context (PEP 409), simpler tracebacks - removed special code needed to deal with imperfections / incompatibilities / missing stuff in py 3.2/3.3, simplify code that can be done simpler in 3.4 - removed some version requirements that were kept on old versions because newer did not support py 3.2 any more - use some py 3.4+ stdlib code instead of own/openssl/pypi code: - use os.urandom instead of own cython openssl RAND_bytes wrapper, #493 - use hashlib.pbkdf2_hmac from py stdlib instead of own openssl wrapper - use hmac.compare_digest instead of == operator (constant time comparison) - use stat.filemode instead of homegrown code - use "mock" library from stdlib, #145 - remove borg.support (with non-broken argparse copy), it is ok in 3.4+, #358 - Vagrant: copy CHANGES.rst as symlink, #592 - cosmetic code cleanups, add flake8 to tox/travis, #4 - docs / help: - make "borg -h" output prettier, #591 - slightly rephrase prune help - add missing example for --list option of borg create - quote exclude line that includes an asterisk to prevent shell expansion - fix dead link to license - delete Ubuntu Vivid, it is not supported anymore (EOL) - OS X binary does not work for older OS X releases, #629 - borg serve's special support for forced/original ssh commands, #544 - misc. updates and fixes Version 0.30.0 (2016-01-23) --------------------------- Compatibility notes: - you may need to use -v (or --info) more often to actually see output emitted at INFO log level (because it is suppressed at the default WARNING log level). See the "general" section in the usage docs. - for borg create, you need --list (additionally to -v) to see the long file list (was needed so you can have e.g. --stats alone without the long list) - see below about BORG_DELETE_I_KNOW_WHAT_I_AM_DOING (was: BORG_CHECK_I_KNOW_WHAT_I_AM_DOING) Bug fixes: - fix crash when using borg create --dry-run --keep-tag-files, #570 - make sure teardown with cleanup happens for Cache and RepositoryCache, avoiding leftover locks and TEMP dir contents, #285 (partially), #548 - fix locking KeyError, partial fix for #502 - log stats consistently, #526 - add abbreviated weekday to timestamp format, fixes #496 - strip whitespace when loading exclusions from file - unset LD_LIBRARY_PATH before invoking ssh, fixes strange OpenSSL library version warning when using the borg binary, #514 - add some error handling/fallback for C library loading, #494 - added BORG_DELETE_I_KNOW_WHAT_I_AM_DOING for check in "borg delete", #503 - remove unused "repair" rpc method name New features: - borg create: implement exclusions using regular expression patterns. - borg create: implement inclusions using patterns. - borg extract: support patterns, #361 - support different styles for patterns: - fnmatch (`fm:` prefix, default when omitted), like borg <= 0.29. - shell (`sh:` prefix) with `*` not matching directory separators and `**/` matching 0..n directories - path prefix (`pp:` prefix, for unifying borg create pp1 pp2 into the patterns system), semantics like in borg <= 0.29 - regular expression (`re:`), new! - --progress option for borg upgrade (#291) and borg delete - update progress indication more often (e.g. for borg create within big files or for borg check repo), #500 - finer chunker granularity for items metadata stream, #547, #487 - borg create --list now used (additionally to -v) to enable the verbose file list output - display borg version below tracebacks, #532 Other changes: - hashtable size (and thus: RAM and disk consumption) follows a growth policy: grows fast while small, grows slower when getting bigger, #527 - Vagrantfile: use pyinstaller 3.1 to build binaries, freebsd sqlite3 fix, fixes #569 - no separate binaries for centos6 any more because the generic linux binaries also work on centos6 (or in general: on systems with a slightly older glibc than debian7 - dev environment: require virtualenv<14.0 so we get a py32 compatible pip - docs: - add space-saving chunks.archive.d trick to FAQ - important: clarify -v and log levels in usage -> general, please read! - sphinx configuration: create a simple man page from usage docs - add a repo server setup example - disable unneeded SSH features in authorized_keys examples for security. - borg prune only knows "--keep-within" and not "--within" - add gource video to resources docs, #507 - add netbsd install instructions - authors: make it more clear what refers to borg and what to attic - document standalone binary requirements, #499 - rephrase the mailing list section - development docs: run build_api and build_usage before tagging release - internals docs: hash table max. load factor is 0.75 now - markup, typo, grammar, phrasing, clarifications and other fixes. - add gcc gcc-c++ to redhat/fedora/corora install docs, fixes #583 Version 0.29.0 (2015-12-13) --------------------------- Compatibility notes: - when upgrading to 0.29.0 you need to upgrade client as well as server installations due to the locking and commandline interface changes otherwise you'll get an error msg about a RPC protocol mismatch or a wrong commandline option. if you run a server that needs to support both old and new clients, it is suggested that you have a "borg-0.28.2" and a "borg-0.29.0" command. clients then can choose via e.g. "borg --remote-path=borg-0.29.0 ...". - the default waiting time for a lock changed from infinity to 1 second for a better interactive user experience. if the repo you want to access is currently locked, borg will now terminate after 1s with an error message. if you have scripts that shall wait for the lock for a longer time, use --lock-wait N (with N being the maximum wait time in seconds). Bug fixes: - hash table tuning (better chosen hashtable load factor 0.75 and prime initial size of 1031 gave ~1000x speedup in some scenarios) - avoid creation of an orphan lock for one case, #285 - --keep-tag-files: fix file mode and multiple tag files in one directory, #432 - fixes for "borg upgrade" (attic repo converter), #466 - remove --progress isatty magic (and also --no-progress option) again, #476 - borg init: display proper repo URL - fix format of umask in help pages, #463 New features: - implement --lock-wait, support timeout for UpgradableLock, #210 - implement borg break-lock command, #157 - include system info below traceback, #324 - sane remote logging, remote stderr, #461: - remote log output: intercept it and log it via local logging system, with "Remote: " prefixed to message. log remote tracebacks. - remote stderr: output it to local stderr with "Remote: " prefixed. - add --debug and --info (same as --verbose) to set the log level of the builtin logging configuration (which otherwise defaults to warning), #426 note: there are few messages emitted at DEBUG level currently. - optionally configure logging via env var BORG_LOGGING_CONF - add --filter option for status characters: e.g. to show only the added or modified files (and also errors), use "borg create -v --filter=AME ...". - more progress indicators, #394 - use ISO-8601 date and time format, #375 - "borg check --prefix" to restrict archive checking to that name prefix, #206 Other changes: - hashindex_add C implementation (speed up cache re-sync for new archives) - increase FUSE read_size to 1024 (speed up metadata operations) - check/delete/prune --save-space: free unused segments quickly, #239 - increase rpc protocol version to 2 (see also Compatibility notes), #458 - silence borg by default (via default log level WARNING) - get rid of C compiler warnings, #391 - upgrade OS X FUSE to 3.0.9 on the OS X binary build system - use python 3.5.1 to build binaries - docs: - new mailing list borgbackup@python.org, #468 - readthedocs: color and logo improvements - load coverage icons over SSL (avoids mixed content) - more precise binary installation steps - update release procedure docs about OS X FUSE - FAQ entry about unexpected 'A' status for unchanged file(s), #403 - add docs about 'E' file status - add "borg upgrade" docs, #464 - add developer docs about output and logging - clarify encryption, add note about client-side encryption - add resources section, with videos, talks, presentations, #149 - Borg moved to Arch Linux [community] - fix wrong installation instructions for archlinux Version 0.28.2 (2015-11-15) --------------------------- New features: - borg create --exclude-if-present TAGFILE - exclude directories that have the given file from the backup. You can additionally give --keep-tag-files to preserve just the directory roots and the tag-files (but not backup other directory contents), #395, attic #128, attic #142 Other changes: - do not create docs sources at build time (just have them in the repo), completely remove have_cython() hack, do not use the "mock" library at build time, #384 - avoid hidden import, make it easier for PyInstaller, easier fix for #218 - docs: - add description of item flags / status output, fixes #402 - explain how to regenerate usage and API files (build_api or build_usage) and when to commit usage files directly into git, #384 - minor install docs improvements Version 0.28.1 (2015-11-08) --------------------------- Bug fixes: - do not try to build api / usage docs for production install, fixes unexpected "mock" build dependency, #384 Other changes: - avoid using msgpack.packb at import time - fix formatting issue in changes.rst - fix build on readthedocs Version 0.28.0 (2015-11-08) --------------------------- Compatibility notes: - changed return codes (exit codes), see docs. in short: old: 0 = ok, 1 = error. now: 0 = ok, 1 = warning, 2 = error New features: - refactor return codes (exit codes), fixes #61 - add --show-rc option enable "terminating with X status, rc N" output, fixes 58, #351 - borg create backups atime and ctime additionally to mtime, fixes #317 - extract: support atime additionally to mtime - FUSE: support ctime and atime additionally to mtime - support borg --version - emit a warning if we have a slow msgpack installed - borg list --prefix=thishostname- REPO, fixes #205 - Debug commands (do not use except if you know what you do: debug-get-obj, debug-put-obj, debug-delete-obj, debug-dump-archive-items. Bug fixes: - setup.py: fix bug related to BORG_LZ4_PREFIX processing - fix "check" for repos that have incomplete chunks, fixes #364 - borg mount: fix unlocking of repository at umount time, fixes #331 - fix reading files without touching their atime, #334 - non-ascii ACL fixes for Linux, FreeBSD and OS X, #277 - fix acl_use_local_uid_gid() and add a test for it, attic #359 - borg upgrade: do not upgrade repositories in place by default, #299 - fix cascading failure with the index conversion code, #269 - borg check: implement 'cmdline' archive metadata value decoding, #311 - fix RobustUnpacker, it missed some metadata keys (new atime and ctime keys were missing, but also bsdflags). add check for unknown metadata keys. - create from stdin: also save atime, ctime (cosmetic) - use default_notty=False for confirmations, fixes #345 - vagrant: fix msgpack installation on centos, fixes #342 - deal with unicode errors for symlinks in same way as for regular files and have a helpful warning message about how to fix wrong locale setup, fixes #382 - add ACL keys the RobustUnpacker must know about Other changes: - improve file size displays, more flexible size formatters - explicitly commit to the units standard, #289 - archiver: add E status (means that an error occurred when processing this (single) item - do binary releases via "github releases", closes #214 - create: use -x and --one-file-system (was: --do-not-cross-mountpoints), #296 - a lot of changes related to using "logging" module and screen output, #233 - show progress display if on a tty, output more progress information, #303 - factor out status output so it is consistent, fix surrogates removal, maybe fixes #309 - move away from RawConfigParser to ConfigParser - archive checker: better error logging, give chunk_id and sequence numbers (can be used together with borg debug-dump-archive-items). - do not mention the deprecated passphrase mode - emit a deprecation warning for --compression N (giving a just a number) - misc .coverragerc fixes (and coverage measurement improvements), fixes #319 - refactor confirmation code, reduce code duplication, add tests - prettier error messages, fixes #307, #57 - tests: - add a test to find disk-full issues, #327 - travis: also run tests on Python 3.5 - travis: use tox -r so it rebuilds the tox environments - test the generated pyinstaller-based binary by archiver unit tests, #215 - vagrant: tests: announce whether fakeroot is used or not - vagrant: add vagrant user to fuse group for debianoid systems also - vagrant: llfuse install on darwin needs pkgconfig installed - vagrant: use pyinstaller from develop branch, fixes #336 - benchmarks: test create, extract, list, delete, info, check, help, fixes #146 - benchmarks: test with both the binary and the python code - archiver tests: test with both the binary and the python code, fixes #215 - make basic test more robust - docs: - moved docs to borgbackup.readthedocs.org, #155 - a lot of fixes and improvements, use mobile-friendly RTD standard theme - use zlib,6 compression in some examples, fixes #275 - add missing rename usage to docs, closes #279 - include the help offered by borg help in the usage docs, fixes #293 - include a list of major changes compared to attic into README, fixes #224 - add OS X install instructions, #197 - more details about the release process, #260 - fix linux glibc requirement (binaries built on debian7 now) - build: move usage and API generation to setup.py - update docs about return codes, #61 - remove api docs (too much breakage on rtd) - borgbackup install + basics presentation (asciinema) - describe the current style guide in documentation - add section about debug commands - warn about not running out of space - add example for rename - improve chunker params docs, fixes #362 - minor development docs update Version 0.27.0 (2015-10-07) --------------------------- New features: - "borg upgrade" command - attic -> borg one time converter / migration, #21 - temporary hack to avoid using lots of disk space for chunks.archive.d, #235: To use it: rm -rf chunks.archive.d ; touch chunks.archive.d - respect XDG_CACHE_HOME, attic #181 - add support for arbitrary SSH commands, attic #99 - borg delete --cache-only REPO (only delete cache, not REPO), attic #123 Bug fixes: - use Debian 7 (wheezy) to build pyinstaller borgbackup binaries, fixes slow down observed when running the Centos6-built binary on Ubuntu, #222 - do not crash on empty lock.roster, fixes #232 - fix multiple issues with the cache config version check, #234 - fix segment entry header size check, attic #352 plus other error handling improvements / code deduplication there. - always give segment and offset in repo IntegrityErrors Other changes: - stop producing binary wheels, remove docs about it, #147 - docs: - add warning about prune - generate usage include files only as needed - development docs: add Vagrant section - update / improve / reformat FAQ - hint to single-file pyinstaller binaries from README Version 0.26.1 (2015-09-28) --------------------------- This is a minor update, just docs and new pyinstaller binaries. - docs update about python and binary requirements - better docs for --read-special, fix #220 - re-built the binaries, fix #218 and #213 (glibc version issue) - update web site about single-file pyinstaller binaries Note: if you did a python-based installation, there is no need to upgrade. Version 0.26.0 (2015-09-19) --------------------------- New features: - Faster cache sync (do all in one pass, remove tar/compression stuff), #163 - BORG_REPO env var to specify the default repo, #168 - read special files as if they were regular files, #79 - implement borg create --dry-run, attic issue #267 - Normalize paths before pattern matching on OS X, #143 - support OpenBSD and NetBSD (except xattrs/ACLs) - support / run tests on Python 3.5 Bug fixes: - borg mount repo: use absolute path, attic #200, attic #137 - chunker: use off_t to get 64bit on 32bit platform, #178 - initialize chunker fd to -1, so it's not equal to STDIN_FILENO (0) - fix reaction to "no" answer at delete repo prompt, #182 - setup.py: detect lz4.h header file location - to support python < 3.2.4, add less buggy argparse lib from 3.2.6 (#194) - fix for obtaining ``char *`` from temporary Python value (old code causes a compile error on Mint 17.2) - llfuse 0.41 install troubles on some platforms, require < 0.41 (UnicodeDecodeError exception due to non-ascii llfuse setup.py) - cython code: add some int types to get rid of unspecific python add / subtract operations (avoid ``undefined symbol FPE_``... error on some platforms) - fix verbose mode display of stdin backup - extract: warn if a include pattern never matched, fixes #209, implement counters for Include/ExcludePatterns - archive names with slashes are invalid, attic issue #180 - chunker: add a check whether the POSIX_FADV_DONTNEED constant is defined - fixes building on OpenBSD. Other changes: - detect inconsistency / corruption / hash collision, #170 - replace versioneer with setuptools_scm, #106 - docs: - pkg-config is needed for llfuse installation - be more clear about pruning, attic issue #132 - unit tests: - xattr: ignore security.selinux attribute showing up - ext3 seems to need a bit more space for a sparse file - do not test lzma level 9 compression (avoid MemoryError) - work around strange mtime granularity issue on netbsd, fixes #204 - ignore st_rdev if file is not a block/char device, fixes #203 - stay away from the setgid and sticky mode bits - use Vagrant to do easy cross-platform testing (#196), currently: - Debian 7 "wheezy" 32bit, Debian 8 "jessie" 64bit - Ubuntu 12.04 32bit, Ubuntu 14.04 64bit - Centos 7 64bit - FreeBSD 10.2 64bit - OpenBSD 5.7 64bit - NetBSD 6.1.5 64bit - Darwin (OS X Yosemite) Version 0.25.0 (2015-08-29) --------------------------- Compatibility notes: - lz4 compression library (liblz4) is a new requirement (#156) - the new compression code is very compatible: as long as you stay with zlib compression, older borg releases will still be able to read data from a repo/archive made with the new code (note: this is not the case for the default "none" compression, use "zlib,0" if you want a "no compression" mode that can be read by older borg). Also the new code is able to read repos and archives made with older borg versions (for all zlib levels 0..9). Deprecations: - --compression N (with N being a number, as in 0.24) is deprecated. We keep the --compression 0..9 for now to not break scripts, but it is deprecated and will be removed later, so better fix your scripts now: --compression 0 (as in 0.24) is the same as --compression zlib,0 (now). BUT: if you do not want compression, you rather want --compression none (which is the default). --compression 1 (in 0.24) is the same as --compression zlib,1 (now) --compression 9 (in 0.24) is the same as --compression zlib,9 (now) New features: - create --compression none (default, means: do not compress, just pass through data "as is". this is more efficient than zlib level 0 as used in borg 0.24) - create --compression lz4 (super-fast, but not very high compression) - create --compression zlib,N (slower, higher compression, default for N is 6) - create --compression lzma,N (slowest, highest compression, default N is 6) - honor the nodump flag (UF_NODUMP) and do not backup such items - list --short just outputs a simple list of the files/directories in an archive Bug fixes: - fixed --chunker-params parameter order confusion / malfunction, fixes #154 - close fds of segments we delete (during compaction) - close files which fell out the lrucache - fadvise DONTNEED now is only called for the byte range actually read, not for the whole file, fixes #158. - fix issue with negative "all archives" size, fixes #165 - restore_xattrs: ignore if setxattr fails with EACCES, fixes #162 Other changes: - remove fakeroot requirement for tests, tests run faster without fakeroot (test setup does not fail any more without fakeroot, so you can run with or without fakeroot), fixes #151 and #91. - more tests for archiver - recover_segment(): don't assume we have an fd for segment - lrucache refactoring / cleanup, add dispose function, py.test tests - generalize hashindex code for any key length (less hardcoding) - lock roster: catch file not found in remove() method and ignore it - travis CI: use requirements file - improved docs: - replace hack for llfuse with proper solution (install libfuse-dev) - update docs about compression - update development docs about fakeroot - internals: add some words about lock files / locking system - support: mention BountySource and for what it can be used - theme: use a lighter green - add pypi, wheel, dist package based install docs - split install docs into system-specific preparations and generic instructions Version 0.24.0 (2015-08-09) --------------------------- Incompatible changes (compared to 0.23): - borg now always issues --umask NNN option when invoking another borg via ssh on the repository server. By that, it's making sure it uses the same umask for remote repos as for local ones. Because of this, you must upgrade both server and client(s) to 0.24. - the default umask is 077 now (if you do not specify via --umask) which might be a different one as you used previously. The default umask avoids that you accidentally give access permissions for group and/or others to files created by borg (e.g. the repository). Deprecations: - "--encryption passphrase" mode is deprecated, see #85 and #97. See the new "--encryption repokey" mode for a replacement. New features: - borg create --chunker-params ... to configure the chunker, fixes #16 (attic #302, attic #300, and somehow also #41). This can be used to reduce memory usage caused by chunk management overhead, so borg does not create a huge chunks index/repo index and eats all your RAM if you back up lots of data in huge files (like VM disk images). See docs/misc/create_chunker-params.txt for more information. - borg info now reports chunk counts in the chunk index. - borg create --compression 0..9 to select zlib compression level, fixes #66 (attic #295). - borg init --encryption repokey (to store the encryption key into the repo), fixes #85 - improve at-end error logging, always log exceptions and set exit_code=1 - LoggedIO: better error checks / exceptions / exception handling - implement --remote-path to allow non-default-path borg locations, #125 - implement --umask M and use 077 as default umask for better security, #117 - borg check: give a named single archive to it, fixes #139 - cache sync: show progress indication - cache sync: reimplement the chunk index merging in C Bug fixes: - fix segfault that happened for unreadable files (chunker: n needs to be a signed size_t), #116 - fix the repair mode, #144 - repo delete: add destroy to allowed rpc methods, fixes issue #114 - more compatible repository locking code (based on mkdir), maybe fixes #92 (attic #317, attic #201). - better Exception msg if no Borg is installed on the remote repo server, #56 - create a RepositoryCache implementation that can cope with >2GiB, fixes attic #326. - fix Traceback when running check --repair, attic #232 - clarify help text, fixes #73. - add help string for --no-files-cache, fixes #140 Other changes: - improved docs: - added docs/misc directory for misc. writeups that won't be included "as is" into the html docs. - document environment variables and return codes (attic #324, attic #52) - web site: add related projects, fix web site url, IRC #borgbackup - Fedora/Fedora-based install instructions added to docs - Cygwin-based install instructions added to docs - updated AUTHORS - add FAQ entries about redundancy / integrity - clarify that borg extract uses the cwd as extraction target - update internals doc about chunker params, memory usage and compression - added docs about development - add some words about resource usage in general - document how to backup a raw disk - add note about how to run borg from virtual env - add solutions for (ll)fuse installation problems - document what borg check does, fixes #138 - reorganize borgbackup.github.io sidebar, prev/next at top - deduplicate and refactor the docs / README.rst - use borg-tmp as prefix for temporary files / directories - short prune options without "keep-" are deprecated, do not suggest them - improved tox configuration - remove usage of unittest.mock, always use mock from pypi - use entrypoints instead of scripts, for better use of the wheel format and modern installs - add requirements.d/development.txt and modify tox.ini - use travis-ci for testing based on Linux and (new) OS X - use coverage.py, pytest-cov and codecov.io for test coverage support I forgot to list some stuff already implemented in 0.23.0, here they are: New features: - efficient archive list from manifest, meaning a big speedup for slow repo connections and "list ", "delete ", "prune" (attic #242, attic #167) - big speedup for chunks cache sync (esp. for slow repo connections), fixes #18 - hashindex: improve error messages Other changes: - explicitly specify binary mode to open binary files - some easy micro optimizations Version 0.23.0 (2015-06-11) --------------------------- Incompatible changes (compared to attic, fork related): - changed sw name and cli command to "borg", updated docs - package name (and name in urls) uses "borgbackup" to have fewer collisions - changed repo / cache internal magic strings from ATTIC* to BORG*, changed cache location to .cache/borg/ - this means that it currently won't accept attic repos (see issue #21 about improving that) Bug fixes: - avoid defect python-msgpack releases, fixes attic #171, fixes attic #185 - fix traceback when trying to do unsupported passphrase change, fixes attic #189 - datetime does not like the year 10.000, fixes attic #139 - fix "info" all archives stats, fixes attic #183 - fix parsing with missing microseconds, fixes attic #282 - fix misleading hint the fuse ImportError handler gave, fixes attic #237 - check unpacked data from RPC for tuple type and correct length, fixes attic #127 - fix Repository._active_txn state when lock upgrade fails - give specific path to xattr.is_enabled(), disable symlink setattr call that always fails - fix test setup for 32bit platforms, partial fix for attic #196 - upgraded versioneer, PEP440 compliance, fixes attic #257 New features: - less memory usage: add global option --no-cache-files - check --last N (only check the last N archives) - check: sort archives in reverse time order - rename repo::oldname newname (rename repository) - create -v output more informative - create --progress (backup progress indicator) - create --timestamp (utc string or reference file/dir) - create: if "-" is given as path, read binary from stdin - extract: if --stdout is given, write all extracted binary data to stdout - extract --sparse (simple sparse file support) - extra debug information for 'fread failed' - delete (deletes whole repo + local cache) - FUSE: reflect deduplication in allocated blocks - only allow whitelisted RPC calls in server mode - normalize source/exclude paths before matching - use posix_fadvise to not spoil the OS cache, fixes attic #252 - toplevel error handler: show tracebacks for better error analysis - sigusr1 / sigint handler to print current file infos - attic PR #286 - RPCError: include the exception args we get from remote Other changes: - source: misc. cleanups, pep8, style - docs and faq improvements, fixes, updates - cleanup crypto.pyx, make it easier to adapt to other AES modes - do os.fsync like recommended in the python docs - source: Let chunker optionally work with os-level file descriptor. - source: Linux: remove duplicate os.fsencode calls - source: refactor _open_rb code a bit, so it is more consistent / regular - source: refactor indicator (status) and item processing - source: use py.test for better testing, flake8 for code style checks - source: fix tox >=2.0 compatibility (test runner) - pypi package: add python version classifiers, add FreeBSD to platforms Attic Changelog --------------- Here you can see the full list of changes between each Attic release until Borg forked from Attic: Version 0.17 ~~~~~~~~~~~~ (bugfix release, released on X) - Fix hashindex ARM memory alignment issue (#309) - Improve hashindex error messages (#298) Version 0.16 ~~~~~~~~~~~~ (bugfix release, released on May 16, 2015) - Fix typo preventing the security confirmation prompt from working (#303) - Improve handling of systems with improperly configured file system encoding (#289) - Fix "All archives" output for attic info. (#183) - More user friendly error message when repository key file is not found (#236) - Fix parsing of iso 8601 timestamps with zero microseconds (#282) Version 0.15 ~~~~~~~~~~~~ (bugfix release, released on Apr 15, 2015) - xattr: Be less strict about unknown/unsupported platforms (#239) - Reduce repository listing memory usage (#163). - Fix BrokenPipeError for remote repositories (#233) - Fix incorrect behavior with two character directory names (#265, #268) - Require approval before accessing relocated/moved repository (#271) - Require approval before accessing previously unknown unencrypted repositories (#271) - Fix issue with hash index files larger than 2GB. - Fix Python 3.2 compatibility issue with noatime open() (#164) - Include missing pyx files in dist files (#168) Version 0.14 ~~~~~~~~~~~~ (feature release, released on Dec 17, 2014) - Added support for stripping leading path segments (#95) "attic extract --strip-segments X" - Add workaround for old Linux systems without acl_extended_file_no_follow (#96) - Add MacPorts' path to the default openssl search path (#101) - HashIndex improvements, eliminates unnecessary IO on low memory systems. - Fix "Number of files" output for attic info. (#124) - limit create file permissions so files aren't read while restoring - Fix issue with empty xattr values (#106) Version 0.13 ~~~~~~~~~~~~ (feature release, released on Jun 29, 2014) - Fix sporadic "Resource temporarily unavailable" when using remote repositories - Reduce file cache memory usage (#90) - Faster AES encryption (utilizing AES-NI when available) - Experimental Linux, OS X and FreeBSD ACL support (#66) - Added support for backup and restore of BSDFlags (OSX, FreeBSD) (#56) - Fix bug where xattrs on symlinks were not correctly restored - Added cachedir support. CACHEDIR.TAG compatible cache directories can now be excluded using ``--exclude-caches`` (#74) - Fix crash on extreme mtime timestamps (year 2400+) (#81) - Fix Python 3.2 specific lockf issue (EDEADLK) Version 0.12 ~~~~~~~~~~~~ (feature release, released on April 7, 2014) - Python 3.4 support (#62) - Various documentation improvements a new style - ``attic mount`` now supports mounting an entire repository not only individual archives (#59) - Added option to restrict remote repository access to specific path(s): ``attic serve --restrict-to-path X`` (#51) - Include "all archives" size information in "--stats" output. (#54) - Added ``--stats`` option to ``attic delete`` and ``attic prune`` - Fixed bug where ``attic prune`` used UTC instead of the local time zone when determining which archives to keep. - Switch to SI units (Power of 1000 instead 1024) when printing file sizes Version 0.11 ~~~~~~~~~~~~ (feature release, released on March 7, 2014) - New "check" command for repository consistency checking (#24) - Documentation improvements - Fix exception during "attic create" with repeated files (#39) - New "--exclude-from" option for attic create/extract/verify. - Improved archive metadata deduplication. - "attic verify" has been deprecated. Use "attic extract --dry-run" instead. - "attic prune --hourly|daily|..." has been deprecated. Use "attic prune --keep-hourly|daily|..." instead. - Ignore xattr errors during "extract" if not supported by the filesystem. (#46) Version 0.10 ~~~~~~~~~~~~ (bugfix release, released on Jan 30, 2014) - Fix deadlock when extracting 0 sized files from remote repositories - "--exclude" wildcard patterns are now properly applied to the full path not just the file name part (#5). - Make source code endianness agnostic (#1) Version 0.9 ~~~~~~~~~~~ (feature release, released on Jan 23, 2014) - Remote repository speed and reliability improvements. - Fix sorting of segment names to ignore NFS left over files. (#17) - Fix incorrect display of time (#13) - Improved error handling / reporting. (#12) - Use fcntl() instead of flock() when locking repository/cache. (#15) - Let ssh figure out port/user if not specified so we don't override .ssh/config (#9) - Improved libcrypto path detection (#23). Version 0.8.1 ~~~~~~~~~~~~~ (bugfix release, released on Oct 4, 2013) - Fix segmentation fault issue. Version 0.8 ~~~~~~~~~~~ (feature release, released on Oct 3, 2013) - Fix xattr issue when backing up sshfs filesystems (#4) - Fix issue with excessive index file size (#6) - Support access of read only repositories. - New syntax to enable repository encryption: attic init --encryption="none|passphrase|keyfile". - Detect and abort if repository is older than the cache. Version 0.7 ~~~~~~~~~~~ (feature release, released on Aug 5, 2013) - Ported to FreeBSD - Improved documentation - Experimental: Archives mountable as FUSE filesystems. - The "user." prefix is no longer stripped from xattrs on Linux Version 0.6.1 ~~~~~~~~~~~~~ (bugfix release, released on July 19, 2013) - Fixed an issue where mtime was not always correctly restored. Version 0.6 ~~~~~~~~~~~ First public release on July 9, 2013 borgbackup-1.1.15/AUTHORS0000644000175000017500000000363413771325506014763 0ustar useruser00000000000000E-mail addresses listed here are not intended for support, please see the `support section`_ instead. .. support section: https://borgbackup.readthedocs.io/en/stable/support.html Borg authors ("The Borg Collective") ------------------------------------ - Thomas Waldmann - Radek Podgorny - Yuri D'Elia - Michael Hanselmann - Teemu Toivanen - Marian Beermann - Martin Hostettler - Daniel Reichelt - Lauri Niskanen - Abdel-Rahman A. (Abogical) - Gu1nness Retired ``````` - Antoine Beaupré Borg is a fork of Attic. Attic authors ------------- Attic is written and maintained by Jonas Borgström and various contributors: Attic Development Lead `````````````````````` - Jonas Borgström Attic Patches and Suggestions ````````````````````````````` - Brian Johnson - Cyril Roussillon - Dan Christensen - Jeremy Maitin-Shepard - Johann Klähn - Petros Moisiadis - Thomas Waldmann BLAKE2 ------ Borg includes BLAKE2: Copyright 2012, Samuel Neves , licensed under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0. Slicing CRC32 ------------- Borg includes a fast slice-by-8 implementation of CRC32, Copyright 2011-2015 Stephan Brumme, licensed under the terms of a zlib license. See http://create.stephan-brumme.com/crc32/ Folding CRC32 ------------- Borg includes an extremely fast folding implementation of CRC32, Copyright 2013 Intel Corporation, licensed under the terms of the zlib license. msgpack ------- Borg includes Python msgpack, Copyright 2008-2011 INADA Naoki licensed under the terms of the Apache License 2.0. xxHash ------ XXH64, a fast non-cryptographic hash algorithm. Copyright 2012-2016 Yann Collet, licensed under a BSD 2-clause license. borgbackup-1.1.15/conftest.py0000644000175000017500000000526613771325506016115 0ustar useruser00000000000000import os import pytest # IMPORTANT keep this above all other borg imports to avoid inconsistent values # for `from borg.constants import PBKDF2_ITERATIONS` (or star import) usages before # this is executed from borg import constants # no fixture-based monkey-patching since star-imports are used for the constants module constants.PBKDF2_ITERATIONS = 1 # needed to get pretty assertion failures in unit tests: if hasattr(pytest, 'register_assert_rewrite'): pytest.register_assert_rewrite('borg.testsuite') import borg.cache from borg.logger import setup_logging # Ensure that the loggers exist for all tests setup_logging() from borg.testsuite import has_lchflags, has_llfuse from borg.testsuite import are_symlinks_supported, are_hardlinks_supported, is_utime_fully_supported from borg.testsuite.platform import fakeroot_detected, are_acls_working from borg import xattr @pytest.fixture(autouse=True) def clean_env(tmpdir_factory, monkeypatch): # avoid that we access / modify the user's normal .config / .cache directory: monkeypatch.setenv('XDG_CONFIG_HOME', str(tmpdir_factory.mktemp('xdg-config-home'))) monkeypatch.setenv('XDG_CACHE_HOME', str(tmpdir_factory.mktemp('xdg-cache-home'))) # also avoid to use anything from the outside environment: keys = [key for key in os.environ if key.startswith('BORG_')] for key in keys: monkeypatch.delenv(key, raising=False) def pytest_report_header(config, startdir): tests = { "BSD flags": has_lchflags, "fuse": has_llfuse, "root": not fakeroot_detected(), "symlinks": are_symlinks_supported(), "hardlinks": are_hardlinks_supported(), "atime/mtime": is_utime_fully_supported(), "modes": "BORG_TESTS_IGNORE_MODES" not in os.environ } enabled = [] disabled = [] for test in tests: if tests[test]: enabled.append(test) else: disabled.append(test) output = "Tests enabled: " + ", ".join(enabled) + "\n" output += "Tests disabled: " + ", ".join(disabled) return output class DefaultPatches: def __init__(self, request): self.org_cache_wipe_cache = borg.cache.LocalCache.wipe_cache def wipe_should_not_be_called(*a, **kw): raise AssertionError("Cache wipe was triggered, if this is part of the test add @pytest.mark.allow_cache_wipe") if 'allow_cache_wipe' not in request.keywords: borg.cache.LocalCache.wipe_cache = wipe_should_not_be_called request.addfinalizer(self.undo) def undo(self): borg.cache.LocalCache.wipe_cache = self.org_cache_wipe_cache @pytest.fixture(autouse=True) def default_patches(request): return DefaultPatches(request) borgbackup-1.1.15/scripts/0000755000175000017500000000000013771325773015402 5ustar useruser00000000000000borgbackup-1.1.15/scripts/borg.exe.spec0000644000175000017500000000376613771325506017775 0ustar useruser00000000000000# -*- mode: python -*- # this pyinstaller spec file is used to build borg binaries on posix platforms import os, sys # Note: SPEC contains the spec file argument given to pyinstaller here = os.path.dirname(os.path.abspath(SPEC)) basepath = os.path.abspath(os.path.join(here, '..')) block_cipher = None a = Analysis([os.path.join(basepath, 'src/borg/__main__.py'), ], pathex=[basepath, ], binaries=[], datas=[ ('../src/borg/paperkey.html', 'borg'), ], hiddenimports=['borg.platform.posix'], hookspath=[], runtime_hooks=[], excludes=[ '_ssl', 'ssl', ], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher) if sys.platform == 'darwin': # do not bundle the osxfuse libraries, so we do not get a version # mismatch to the installed kernel driver of osxfuse. a.binaries = [b for b in a.binaries if 'libosxfuse' not in b[0]] pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, a.binaries, a.zipfiles, a.datas, name='borg.exe', debug=False, strip=False, upx=True, console=True ) # Build a directory-based binary in addition to a packed # single file. This allows one to easily look at all included # files (e.g. without having to strace or halt the built binary # and introspect /tmp). Also avoids unpacking all libs when # running the app, which is better for app signing on various OS. slim_exe = EXE(pyz, a.scripts, exclude_binaries=True, name='borg.exe', debug=False, strip=False, upx=False, console=True) coll = COLLECT(slim_exe, a.binaries, a.zipfiles, a.datas, strip=False, upx=False, name='borg-dir') borgbackup-1.1.15/scripts/sign-binaries0000755000175000017500000000051413771325506020054 0ustar useruser00000000000000#!/bin/bash D=$1 if [ "$D" = "" ]; then echo "Usage: sign-binaries 201912312359" exit fi if [ "$QUBES_GPG_DOMAIN" = "" ]; then GPG=gpg else GPG=qubes-gpg-client-wrapper fi for file in dist/borg-*; do $GPG --local-user "Thomas Waldmann" --armor --detach-sign --output $file.asc $file done touch -t $D dist/* borgbackup-1.1.15/scripts/hash_sizes.py0000644000175000017500000000543713771325506020117 0ustar useruser00000000000000""" Compute hashtable sizes with nices properties - prime sizes (for small to medium sizes) - 2 prime-factor sizes (for big sizes) - fast growth for small sizes - slow growth for big sizes Note: this is just a tool for developers. within borgbackup, it is just used to generate hash_sizes definition for _hashindex.c. """ from collections import namedtuple K, M, G = 2**10, 2**20, 2**30 # hash table size (in number of buckets) start, end_p1, end_p2 = 1 * K, 127 * M, 2 * G - 10 * M # stay well below 2^31 - 1 Policy = namedtuple("Policy", "upto grow") policies = [ # which growth factor to use when growing a hashtable of size < upto # grow fast (*2.0) at the start so we do not have to resize too often (expensive). # grow slow (*1.1) for huge hash tables (do not jump too much in memory usage) Policy(256*K, 2.0), Policy(2*M, 1.7), Policy(16*M, 1.4), Policy(128*M, 1.2), Policy(2*G-1, 1.1), ] # slightly modified version of: # http://www.macdevcenter.com/pub/a/python/excerpt/pythonckbk_chap1/index1.html?page=2 def eratosthenes(): """Yields the sequence of prime numbers via the Sieve of Eratosthenes.""" D = {} # map each composite integer to its first-found prime factor q = 2 # q gets 2, 3, 4, 5, ... ad infinitum while True: p = D.pop(q, None) if p is None: # q not a key in D, so q is prime, therefore, yield it yield q # mark q squared as not-prime (with q as first-found prime factor) D[q * q] = q else: # let x <- smallest (N*p)+q which wasn't yet known to be composite # we just learned x is composite, with p first-found prime factor, # since p is the first-found prime factor of q -- find and mark it x = p + q while x in D: x += p D[x] = p q += 1 def two_prime_factors(pfix=65537): """Yields numbers with 2 prime factors pfix and p.""" for p in eratosthenes(): yield pfix * p def get_grow_factor(size): for p in policies: if size < p.upto: return p.grow def find_bigger_prime(gen, i): while True: p = next(gen) if p >= i: return p def main(): sizes = [] i = start gen = eratosthenes() while i < end_p1: grow_factor = get_grow_factor(i) p = find_bigger_prime(gen, i) sizes.append(p) i = int(i * grow_factor) gen = two_prime_factors() # for lower ram consumption while i < end_p2: grow_factor = get_grow_factor(i) p = find_bigger_prime(gen, i) sizes.append(p) i = int(i * grow_factor) print("""\ static int hash_sizes[] = { %s }; """ % ', '.join(str(size) for size in sizes)) if __name__ == '__main__': main() borgbackup-1.1.15/scripts/sdist-sign0000755000175000017500000000046713771325506017415 0ustar useruser00000000000000#!/bin/bash R=$1 if [ "$R" = "" ]; then echo "Usage: sdist-sign 1.2.3" exit fi if [ "$QUBES_GPG_DOMAIN" = "" ]; then GPG=gpg else GPG=qubes-gpg-client-wrapper fi python setup.py sdist D=dist/borgbackup-$R.tar.gz $GPG --detach-sign --local-user "Thomas Waldmann" --armor --output $D.asc $D borgbackup-1.1.15/scripts/shell_completions/0000755000175000017500000000000013771325773021125 5ustar useruser00000000000000borgbackup-1.1.15/scripts/shell_completions/zsh/0000755000175000017500000000000013771325773021731 5ustar useruser00000000000000borgbackup-1.1.15/scripts/shell_completions/zsh/_borg0000644000175000017500000015314413771325506022746 0ustar useruser00000000000000#compdef borg borgfs -P -value-,BORG_*,-default- # Zsh completion for Borg Backup 1.1.15. # # Recommended _borg specific settings: # # zstyle -e ':completion:*:*:borg-*:argument-rest:*' tag-order \ # '[[ $words[CURRENT] == -* ]] && reply=( "! archives archive-files" "-" )' # zstyle ':completion:*:*:(borg|-value-,BORG_)*' sort false # zstyle ':completion:*:*:borg-config:argument-2:keys' list-grouped false # zstyle ':completion:*:*:borg-*:*' gain-privileges true # zstyle ':completion:*' fake-parameters 'BORG_REPO:scalar' # # Custom styles: # # archive-description-format # Default: `{archive:<36} {time} [{id}]`. # archive-sort # In which order archive names should be listed. # Possible values are: `inverse`, `timestamp`, `name`, `id`. # file-description-format # Default: `{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}`. # path-style-selector # Style selector used to select a path (when Borg would use either `pp` or `pf`). # Default: `fm`. # repository-suffix # This boolean style controls whether to add the `::` auto-removable suffix to a repository. # Default: `true`. (( $+functions[_borg_commands] )) || _borg_commands() { local -a commands_=( 'benchmark:benchmark command' 'break-lock:break repository and cache locks' 'change-passphrase:change repository passphrase' 'check:verify repository' 'config:get and set configuration values' 'create:create backup' 'debug:debugging command (not intended for normal use)' 'delete:delete archive' 'diff:find differences in archive contents' 'export-tar:create tarball from archive' 'extract:extract archive contents' 'help:extra help' 'info:show repository or archive information' 'init:initialize empty repository' 'key:manage repository key' 'list:list archive or repository contents' 'mount:mount repository' 'prune:prune archives' 'recreate:re-create archives' 'rename:rename archive' 'serve:start repository server process' 'umount:umount repository' 'upgrade:upgrade repository format' 'with-lock:run user command with lock held' ) _describe -t commands 'borg commands' commands_ } (( $+functions[_borg-benchmark] )) || _borg-benchmark() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ $common_options \ ':type:(crud)' \ ': :_borg_repository' \ ':PATH:_files' } (( $+functions[_borg-break-lock] )) || _borg-break-lock() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ $common_options \ ':: :_borg_repository' } (( $+functions[_borg-change-passphrase] )) || _borg-change-passphrase() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ $common_options \ ':: :_borg_repository' } (( $+functions[_borg-check] )) || _borg-check() { local -a common_options common_archive_filters_options __borg_setup_common_options __borg_setup_common_archive_filters_options _arguments -s -w -S : \ '--repository-only[only perform repository checks]' \ '--archives-only[only perform archives checks]' \ '(--repository-only)--verify-data[perform cryptographic archive data integrity verification]' \ '--repair[attempt to repair any inconsistencies found]' \ '--save-space[work slower, but using less space]' \ $common_archive_filters_options \ $common_options \ '::REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' } (( $+functions[_borg-config] )) || _borg-config() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ '(-c --cache)'{-c,--cache}'[get and set values from the repo cache]' \ '(-d --delete)'{-d,--delete}'[delete the key from the config]' \ '(-l --list)'{-l,--list}'[list the configuration of the repo]' \ $common_options \ ': :_borg_repository' \ ': : _borg_config $line[1]' \ '::VALUE' } (( $+functions[_borg-create] )) || _borg-create() { local -a common_options common_create_options __borg_setup_common_options __borg_setup_common_create_options _arguments -s -w -S : \ '*'{-e,--exclude}'=[exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files -f -e "$line[1]" fm "${(@)line[2,-1]}"' \ '*--pattern=[experimental: include/exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files -p -f -e "$line[1]" sh "${(@)line[2,-1]}"' \ $common_create_options \ '(-s --stats)--json[Output stats as JSON. Implies --stats.]' \ '--no-cache-sync[experimental: do not synchronize the cache. Implies not using the files cache.]' \ '--no-files-cache[do not load/update the file metadata cache used to detect unchanged files]' \ '--stdin-name=[use NAME in archive for stdin data (default: "stdin")]:NAME' \ '--stdin-user=[set user USER in archive for stdin data (default: root)]:USER:_users' \ '--stdin-group=[set group GROUP in archive for stdin data (default: root)]:GROUP:_groups' \ '--stdin-mode=[set mode to M in archive for stdin data (default: 0660)]: : _borg_guard_numeric_mode "M"' \ '--exclude-nodump[exclude files flagged NODUMP]' \ '(-x --one-file-system)'{-x,--one-file-system}'[stay in the same file system]' \ '--numeric-owner[only store numeric user and group identifiers]' \ '--noatime[do not store atime into archive]' \ '--noctime[do not store ctime into archive]' \ '--nobirthtime[do not store birthtime (creation date) into archive]' \ '--nobsdflags[do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive]' \ '--ignore-inode[ignore inode data in the file metadata cache used to detect unchanged files]' \ '--files-cache=[operate files cache in MODE. default: ctime,size,inode]:MODE:(ctime,size,inode mtime,size,inode ctime,size mtime,size rechunk,ctime rechunk,mtime disabled)' \ '--read-special[open and read block and char device files as well as FIFOs as if they were regular files]' \ $common_options \ ':ARCHIVE: _borg_repository_or_archive -a -p' \ '*:PATH:_files' } (( $+functions[_borg-debug] )) || _borg-debug() { local -a state line common_options local curcontext="$curcontext" state_descr declare -A opt_args local -i ret=1 __borg_setup_common_options _arguments -s -w -C : \ $common_options \ ': :->command' \ '*:: :->option-or-argument' && ret=0 case $state in (command) local -a debug_commands=( 'info:show system infos for debugging / bug reports' 'dump-archive-items:dump archive items (metadata)' 'dump-archive:dump decoded archive metadata' 'dump-manifest:dump decoded repository metadata' 'dump-repo-objs:dump repo objects' 'search-repo-objs:search repo objects' 'get-obj:get object from repository' 'put-obj:put object to repository' 'delete-obj:delete object from repository' 'refcount-obj:show refcount for object from repository' 'convert-profile:convert Borg profile to Python profile' ) _describe -t commands 'command' debug_commands && ret=0 ;; (option-or-argument) curcontext="${curcontext%:*}-$line[1]:" case $line[1] in (info) _arguments -s -w -S : \ $common_options && ret=0 ;; (dump-archive-items) _arguments -s -w -S : \ $common_options \ ':ARCHIVE: _borg_repository_or_archive -a' && ret=0 ;; (dump-archive) _arguments -s -w -S : \ $common_options \ ':ARCHIVE: _borg_repository_or_archive -a' \ ':PATH:_files' && ret=0 ;; (dump-manifest) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' \ ':PATH:_files' && ret=0 ;; (dump-repo-objs) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' && ret=0 ;; (search-repo-objs) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' \ ':WANTED (hex or string):' && ret=0 ;; (get-obj) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' \ ':ID (hex object):' \ ':PATH:_files' && ret=0 ;; (put-obj) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' \ '*:PATH:_files' && ret=0 ;; (delete-obj) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' \ '*:ID (hex object):' && ret=0 ;; (refcount-obj) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' \ '*:ID (hex object):' && ret=0 ;; (convert-profile) _arguments -s -w -S : \ $common_options \ ':INPUT:_files' \ ':OUTPUT:_files' && ret=0 ;; (*) if ! _call_function ret _borg_debug_$line[1]; then _default && ret=0 fi ;; esac ;; esac return ret } (( $+functions[_borg-delete] )) || _borg-delete() { local -a common_options common_archive_filters_options common_dry_run_stats_options __borg_setup_common_options __borg_setup_common_archive_filters_options __borg_setup_common_dry_run_stats_options _arguments -s -w -S : \ $common_dry_run_stats_options \ '--cache-only[delete only the local cache for the given repository]' \ '*--force[force deletion of corrupted archives, use "--force --force" in case "--force" does not work]' \ '--save-space[work slower, but using less space]' \ $common_archive_filters_options \ $common_options \ ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ '*:ARCHIVE: _borg_archive "${line[1]%%::*}"' } (( $+functions[_borg-diff] )) || _borg-diff() { local -a common_options common_exclude_options __borg_setup_common_options __borg_setup_common_exclude_options _arguments -s -w -S : \ '--numeric-owner[only obey numeric user and group identifiers]' \ '--same-chunker-params[override check of chunker parameters]' \ '--sort[sort the output lines by file path]' \ $common_exclude_options \ $common_options \ ':ARCHIVE1: _borg_repository_or_archive -a' \ ':ARCHIVE2: _borg_archive "${line[1]%%::*}"' \ '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' } (( $+functions[_borg-export-tar] )) || _borg-export-tar() { local -a common_options common_exclude_extract_options __borg_setup_common_options __borg_setup_common_exclude_extract_options _arguments -s -w -S : \ '--tar-filter[filter program to pipe data through]: :_cmdstring' \ '--list[output verbose list of items (files, dirs, ...)]' \ $common_exclude_extract_options \ $common_options \ ':ARCHIVE: _borg_repository_or_archive -a' \ ':FILE:_files' \ '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' } (( $+functions[_borg-extract] )) || _borg-extract() { local -a common_options common_exclude_extract_options __borg_setup_common_options __borg_setup_common_exclude_extract_options _arguments -s -w -S : \ '--list[output verbose list of items (files, dirs, ...)]' \ '(-n --dry-run)'{-n,--dry-run}'[do not actually change any files]' \ '--numeric-owner[only obey numeric user and group identifiers]' \ '--nobsdflags[do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE)]' \ '--stdout[write all extracted data to stdout]' \ '--sparse[create holes in output sparse file from all-zero chunks]' \ $common_exclude_extract_options \ $common_options \ ':ARCHIVE: _borg_repository_or_archive -a' \ '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' } (( $+functions[_borg-help] )) || _borg-help() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ '--epilog-only' \ '--usage-only' \ $common_options \ ':: : _alternative "topics:TOPIC:(patterns placeholders compression)" ": :_borg_commands"' } (( $+functions[_borg-info] )) || _borg-info() { local -a common_options common_archive_filters_options __borg_setup_common_options __borg_setup_common_archive_filters_options _arguments -s -w -S : \ '--json[format output as JSON]' \ $common_archive_filters_options \ $common_options \ '::REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' } (( $+functions[_borg-init] )) || _borg-init() { local -i ret=1 local -a common_options common_init_options __borg_setup_common_options __borg_setup_common_init_options # special handling for the required optional argument if (( ! ${words[(I)(-e|--encryption)(|=*)]} )); then local desc='select encryption key mode' local -a long=( "--encryption:$desc" ) short=( "-e:$desc" ) remove_chars=( -r '= \t\n\-' ) _describe -t required-options 'required option' long -S '=' $remove_chars -- short $remove_chars && ret=0 fi _arguments -s -w -S : \ '(-e --encryption)'{-e,--encryption}'=[select encryption key mode (required)]:MODE:(none keyfile keyfile-blake2 repokey repokey-blake2 authenticated authenticated-blake2)' \ $common_init_options \ '--make-parent-dirs[create parent directories]' \ '::REPOSITORY:_directories' && ret=0 return ret } (( $+functions[_borg-key] )) || _borg-key() { local -a state line common_options local curcontext="$curcontext" state_descr declare -A opt_args local -i ret=1 __borg_setup_common_options _arguments -s -w -C : \ $common_options \ ': :->command' \ '*:: :->option-or-argument' && ret=0 case $state in (command) local -a key_commands=( 'change-passphrase:Change repository key file passphrase' 'export:Export the repository key for backup' 'import:Import the repository key from backup' 'migrate-to-repokey:Migrate passphrase -> repokey' ) _describe -t commands 'command' key_commands && ret=0 ;; (option-or-argument) curcontext="${curcontext%:*}-$line[1]:" case $line[1] in (change-passphrase) _arguments -s -w -S : \ $common_options \ ': :_borg_repository' && ret=0 ;; (export) _arguments -s -w -S : \ '--paper[create an export suitable for printing and later type-in]' \ '--qr-html[create an html file suitable for printing and later type-in or qr scan]' \ $common_options \ ': :_borg_repository' \ '::PATH:_files' && ret=0 ;; (import) _arguments -s -w -S : \ '--paper[interactively import from a backup done with --paper]' \ $common_options \ ': :_borg_repository' \ '::PATH:_files' && ret=0 ;; (migrate-to-repokey) _arguments -s -w -S : \ $common_options \ ':: :_borg_repository' && ret=0 ;; (*) if ! _call_function ret _borg_key_$line[1]; then _default && ret=0 fi ;; esac ;; esac return ret } (( $+functions[_borg-list] )) || _borg-list() { local -a common_options common_exclude_options common_archive_filters_options __borg_setup_common_options __borg_setup_common_exclude_options __borg_setup_common_archive_filters_options _arguments -s -w -S : \ '--short[only print file/directory names, nothing else]' \ {--format,--list-format}'=[specify format for file listing]:FORMAT: _borg_format_keys $line[1]' \ '--json[Only valid for listing repository contents. Format output as JSON.]' \ '--json-lines[Only valid for listing archive contents. Format output as JSON Lines.]' \ $common_archive_filters_options \ $common_exclude_options \ $common_options \ ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' } (( $+functions[_borg-mount] )) || _borg-mount() { local -a common_options common_exclude_extract_options common_archive_filters_options __borg_setup_common_options __borg_setup_common_exclude_extract_options __borg_setup_common_archive_filters_options _arguments -s -w -S : \ $* \ '(-f --foreground)'{-f,--foreground}'[stay in foreground, do not daemonize]' \ '-o[mount options]: :_fuse_values "mount options" "versions[merged, versioned view of the files in the archives]" "allow_damaged_files[read damaged files]" "ignore_permissions[not enforce \"default_permissions\"]"' \ $common_archive_filters_options \ $common_exclude_extract_options \ $common_options \ ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ ':MOUNTPOINT:_directories' \ '*: : _borg_style_selector_or_archive_files "$line[1]" pp' } (( $+functions[_borg-prune] )) || _borg-prune() { local -a common_options common_prefix_and_glob_archives_filter_options common_dry_run_stats_options __borg_setup_common_options __borg_setup_common_prefix_and_glob_archives_filter_options __borg_setup_common_dry_run_stats_options _arguments -s -w -S : \ $common_dry_run_stats_options \ '*--force[force pruning of corrupted archives, use "--force --force" in case "--force" does not work]' \ '--list[output verbose list of archives it keeps/prunes]' \ '--keep-within[keep all archives within this time interval]: : _borg_guard_unsigned_number "INTERVAL"' \ '(--keep-last --keep-secondly)'{--keep-last,--keep-secondly}'[number of secondly archives to keep]: : _borg_guard_unsigned_number "N"' \ '--keep-minutely[number of minutely archives to keep]: : _borg_guard_unsigned_number "N"' \ '(-H --keep-hourly)'{-H,--keep-hourly}'[number of hourly archives to keep]: : _borg_guard_unsigned_number "N"' \ '(-d --keep-daily)'{-d,--keep-daily}'[number of daily archives to keep]: : _borg_guard_unsigned_number "N"' \ '(-w --keep-weekly)'{-w,--keep-weekly}'[number of weekly archives to keep]: : _borg_guard_unsigned_number "N"' \ '(-m --keep-monthly)'{-m,--keep-monthly}'[number of monthly archives to keep]: : _borg_guard_unsigned_number "N"' \ '(-y --keep-yearly)'{-y,--keep-yearly}'[number of yearly archives to keep]: : _borg_guard_unsigned_number "N"' \ '--save-space[work slower, but using less space]' \ $common_prefix_and_glob_archives_filter_options \ $common_options \ ':: :_borg_repository' } (( $+functions[_borg-recreate] )) || _borg-recreate() { local -a common_options common_create_options __borg_setup_common_options __borg_setup_common_create_options local -a mods=( 'if-different:recompress if current compression is with a different compression algorithm (the level is not considered)' 'always:recompress even if current compression is with the same compression algorithm (use this to change the compression level)' 'never:do not recompress (use this option to explicitly prevent recompression)' ) mods=( ${${(q)mods//\\/\\\\}//:/\\:} ) _arguments -s -w -S : \ $common_create_options \ '--target=[create a new archive with the name ARCHIVE]:ARCHIVE: _borg_placeholder_or_archive "${line[1]%%\:\:*}"' \ '--recompress=[recompress data chunks according to "MODE" and "--compression"]:MODE:'"(($mods))" \ $common_options \ ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' } (( $+functions[_borg-rename] )) || _borg-rename() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ $common_options \ ':ARCHIVE: _borg_repository_or_archive -a' \ ':NEWNAME' } (( $+functions[_borg-serve] )) || _borg-serve() { local -a common_options common_init_options __borg_setup_common_options __borg_setup_common_init_options _arguments -s -w -S : \ $common_init_options \ '*--restrict-to-path=[restrict repository access to PATH]:PATH:_files' \ '*--restrict-to-repository=[restrict repository access]: :_borg_repository' } (( $+functions[_borg-umount] )) || _borg-umount() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ $common_options \ ':MOUNTPOINT:_umountable' } (( $+functions[_borg-upgrade] )) || _borg-upgrade() { local -a common_options __borg_setup_common_options _arguments -s -w -S : \ '(-n --dry-run)'{-n,--dry-run}'[do not change repository]' \ '--inplace[rewrite repository in place, with no chance of going back to older versions of the repository]' \ '--force[force upgrade]' \ '--tam[enable manifest authentication (in key and cache)]' \ '--disable-tam[disable manifest authentication (in key and cache)]' \ $common_options \ ':: :_borg_repository' } (( $+functions[_borg-with-lock] )) || _borg-with-lock() { local -a state line common_options local curcontext="$curcontext" state_descr declare -A opt_args local -i ret=1 __borg_setup_common_options _arguments -s -w -C -S : \ $common_options \ '(-): :_borg_repository' \ '(-):COMMAND: _command_names -e' \ '(-)*:ARGS:->normal' && ret=0 case $state in (normal) shift 2 words (( CURRENT -= 2 )) _normal && ret=0 ;; esac return ret } (( $+functions[__borg_setup_common_options] )) || __borg_setup_common_options() { typeset -ga common_options=( '(- :)'{-h,--help}'[show this help message and exit]' '--critical[work on log level CRITICAL]' '--error[work on log level ERROR]' '--warning[work on log level WARNING (default)]' '(--info -v --verbose)'{--info,-v,--verbose}'[work on log level INFO]' '--debug[work on log level DEBUG]' '--debug-topic=[enable TOPIC debugging (can be specified multiple times)]:TOPIC' '(-p --progress)'{-p,--progress}'[show progress information]' '--log-json[Output one JSON object per log line instead of formatted text.]' '--lock-wait=[wait at most SECONDS for acquiring a repository/cache lock (default: 1)]: : _borg_guard_unsigned_number "SECONDS"' '--bypass-lock[bypass locking mechanism]' '(- :)--show-version[show/log the borg version]' '--show-rc[show/log the return code (rc)]' '--umask=[set umask to M (local and remote, default: 0077)]: : _borg_guard_numeric_mode "M"' '--remote-path=[set remote path to executable (default: "borg")]: :_cmdstring' '--remote-ratelimit=[set remote network upload rate limit in kiByte/s (default: 0=unlimited)]: : _borg_guard_unsigned_number "RATE"' '--consider-part-files[treat part files like normal files (e.g. to list/extract them)]' '--debug-profile=[write execution profile in Borg format into FILE]:FILE:_files' '--rsh=[use COMMAND instead of ssh]: :_cmdstring' ) } (( $+functions[__borg_setup_common_exclude_options] )) || __borg_setup_common_exclude_options() { typeset -ga common_exclude_options=( '*'{-e,--exclude}'=[exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files "$line[1]" fm' '*--exclude-from=[read exclude patterns from EXCLUDEFILE, one per line]:EXCLUDEFILE:_files' '*--pattern=[experimental: include/exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files -p "$line[1]" sh' '*--patterns-from=[experimental: read include/exclude patterns from PATTERNFILE, one per line]:PATTERNFILE:_files' ) } (( $+functions[__borg_setup_common_exclude_extract_options] )) || __borg_setup_common_exclude_extract_options() { local -a common_exclude_options __borg_setup_common_exclude_options typeset -ga common_exclude_extract_options=( $common_exclude_options '--strip-components=[Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped.]: : _borg_guard_unsigned_number "NUMBER"' ) } (( $+functions[__borg_setup_common_prefix_and_glob_archives_filter_options] )) || __borg_setup_common_prefix_and_glob_archives_filter_options() { typeset -ga common_prefix_and_glob_archives_filter_options=( '(-P --prefix -a --glob-archives)'{-P,--prefix}'=[only consider archive names starting with this prefix]:PREFIX: _borg_archive -n "${line[1]%%\:\:*}"' '(-P --prefix)*'{-a,--glob-archives}'=[only consider archive names matching the glob]:GLOB: _borg_archive -n "${line[1]%%\:\:*}"' ) } (( $+functions[__borg_setup_common_archive_filters_options] )) || __borg_setup_common_archive_filters_options() { local -a common_prefix_and_glob_archives_filter_options __borg_setup_common_prefix_and_glob_archives_filter_options typeset -ga common_archive_filters_options=( $common_prefix_and_glob_archives_filter_options '--sort-by=[Comma-separated list of sorting keys, default: timestamp]:KEYS:(timestamp name id)' '(--last)--first=[consider first N archives after other filters were applied]:N: _borg_archive -n "${line[1]%%\:\:*}"' '(--first)--last=[consider last N archives after other filters were applied]:N: _borg_archive -n "${line[1]%%\:\:*}"' ) } (( $+functions[__borg_setup_common_dry_run_stats_options] )) || __borg_setup_common_dry_run_stats_options() { typeset -ga common_dry_run_stats_options=( '(-n --dry-run -s --stats)'{-n,--dry-run}'[do not change anything]' '(-n --dry-run -s --stats)'{-s,--stats}'[print statistics at end]' # NOTE: actual messages for subcommands differ in details ) } (( $+functions[__borg_setup_common_create_options] )) || __borg_setup_common_create_options() { local -a common_dry_run_stats_options common_exclude_options __borg_setup_common_dry_run_stats_options __borg_setup_common_exclude_options typeset -ga common_create_options=( $common_dry_run_stats_options '--list[output verbose list of items (files, dirs, ...)]' '--filter=[only display items with the given status characters]: :_borg_statuschars' $common_exclude_options '--exclude-caches[exclude directories that contain a CACHEDIR.TAG file]' '*--exclude-if-present=[exclude directories that are tagged by containing a filesystem object with the given NAME]:NAME:_files' '--keep-'{exclude-tags,tag-files}'[if tag objects are specified with --exclude-if-present, don'\''t omit the tag objects themselves]' '--comment=[add a comment text to the archive]:COMMENT:_borg_placeholders' '--timestamp=[manually specify the archive creation date/time]:TIMESTAMP:_borg_timestamp' '(-c --checkpoint-interval)'{-c,--checkpoint-interval}'=[write checkpoint every SECONDS seconds (default: 1800)]: : _borg_guard_unsigned_number "SECONDS"' '--chunker-params=[specify the chunker parameters]: :_borg_chunker_params' '(-C --compression)'{-C,--compression}'=[select compression algorithm]: :_borg_compression' ) } (( $+functions[__borg_setup_common_init_options] )) || __borg_setup_common_init_options() { local -a common_options __borg_setup_common_options typeset -ga common_init_options=( $common_options '--append-only[only allow appending to repository segment files]' '--storage-quota=[override storage quota of the repository]: :_borg_quota_suffixes' ) } (( $+functions[_borgfs] )) || _borgfs() { _borg-mount '(- :)'{-V,--version}'[show version number and exit]' } (( $+functions[_borg_parameters] )) || _borg_parameters() { local name=$1 shift local -i ret=1 local -a expl case $name in (REPO) local BORG_REPO unset BORG_REPO _borg_repository && ret=0 ;; ((|NEW_)PASSPHRASE) _message -e 'passphrase' && ret=0 ;; (DISPLAY_PASSPHRASE) _message -e 'answer to the "display the passphrase for verification" question' && ret=0 ;; (HOST_ID) _message -e 'unique ID' && ret=0 ;; (FILES_CACHE_SUFFIX) _message -e 'suffix' && ret=0 ;; (FILES_CACHE_TTL) _borg_guard_unsigned_number 'time to live (default: 20)' && ret=0 ;; (MOUNT_DATA_CACHE_ENTRIES) _borg_guard_unsigned_number 'number of cached data chunks' && ret=0 ;; (PASSCOMMAND|RSH|REMOTE_PATH) _cmdstring && ret=0 ;; (HOSTNAME_IS_UNIQUE|SHOW_SYSINFO|(UNKNOWN_UNENCRYPTED|RELOCATED)_REPO_ACCESS_IS_OK) _description values expl 'value' compadd "$expl[@]" yes no && ret=0 ;; ((CHECK|DELETE)_I_KNOW_WHAT_I_AM_DOING) _description values expl 'value' compadd "$expl[@]" YES NO && ret=0 ;; (WORKAROUNDS) _wanted workarounds expl 'workaround' _sequence -n 1 compadd - basesyncfile && ret=0 ;; (KEYS_DIR) _directories && ret=0 ;; (*) _default && ret=0 ;; esac return ret } (( $+functions[_borg_repository] )) || _borg_repository() { local -a alts opts qopts zparseopts -E -a opts S: qopts=( ${(q)opts} ) [[ -n $BORG_REPO ]] && alts+=( "default-repository: : __borg_default_repository $qopts" ) alts+=( "cached-repositories:cached repositories:_borg_cached_repositories $qopts" ) alts+=( 'directories: :_directories -r ":/ \t\n\-"' ) alts+=( 'remote-repositories: : _borg_remote_repositories' ) _alternative $alts } (( $+functions[__borg_default_repository] )) || __borg_default_repository() { local -a opts suf zparseopts -E -a opts S: (( $opts[(I)-S] )) && suf=( -S '' ) local -a default_repository=( "\:\::$BORG_REPO" ) _describe -t default-repository 'default repository' default_repository "$suf[@]" } (( $+functions[_borg_cached_repositories] )) || _borg_cached_repositories() { local -a cached_repos local sed_script='/^previous_location = / { s/// # no port was given /ssh:\/\/[^/:]+:[0-9]+/! { # lstrip the `ssh://` prefix and add a colon before the first slash s!ssh://([^:/]+)/(.*)!\1:/\2! } p }' local cachedir=${BORG_CACHE_DIR:-${XDG_CACHE_HOME:-${BORG_BASE_DIR:-$HOME}/.cache}/borg} cached_repos=( ${(f)"$(_call_program -p cached-repositories sed -n -E ${(q)sed_script} \ "${(q)cachedir}/*/config(#qN.om)" 2>/dev/null)"} ) if [[ $compstate[quote] != (\'|\") ]]; then # hide ~BORG_REPO and other scalars local BORG_REPO unset BORG_REPO sed_script cachedir cached_repos=( "${(@D)cached_repos}" ) fi compadd -Q "$@" -r ': \t\n\-' -a cached_repos } (( $+functions[_borg_remote_repositories] )) || _borg_remote_repositories() { local -a match mbegin mend expl alts if compset -P '(#b)ssh://[^/]##@[^/]##:([0-9]##)/'; then _remote_files -/ -- ssh -p $match[1] return fi local -i have_scheme=0 compset -P 'ssh://' && have_scheme=1 if compset -P '*:'; then (( have_scheme )) && alts+=( 'ports: : _borg_guard_unsigned_number "port"' ) alts+=( 'remote-files:remote file: _remote_files -/ -- ssh' ) _alternative $alts elif compset -P 1 '(#b)(*)@'; then local user=$match[1] _wanted -C user-at hosts expl "host for $user" \ _combination -s '[:@]' accounts users-hosts users="$user" hosts -S ':' - elif compset -S '@*'; then _wanted users expl "user" \ _combination -s '[:@]' accounts users-hosts users -q - else alts=( 'users:user:_users -S "@"' 'hosts:host:_hosts -S ":"' ) (( ! have_scheme )) && alts+=( 'prefixes:ssh:compadd -S "" ssh://' ) _alternative $alts fi } # _borg_repository_or_archive [-a] [-p] # # -a archive is mandatory. The suffix `::` will be added to the repository if possible. # -p complete placeholders (( $+functions[_borg_repository_or_archive] )) || _borg_repository_or_archive() { local -A opts zparseopts -A opts -D -E a p if compset -P 1 '*::'; then local qrepo=$IPREFIX[1,-3] local -i def_repo=0 [[ -z $qrepo && -n $BORG_REPO ]] && qrepo=${(q)BORG_REPO} && def_repo=1 if [[ -n $qrepo ]]; then if (( ! def_repo )); then case $compstate[quote] in (\') qrepo=${(qq)qrepo} ;; (\") qrepo=${(qqq)${(e)qrepo}} ;; # NOTE: currently `(e)` don't have any effect, but maybe one day zsh will stop to change the quoting method # of double quoted parameters esac fi if (( $+opts[-p] )); then _borg_placeholder_or_archive $qrepo else _borg_archive $qrepo fi else _message "not a borg repository: ${(Q)qrepo}" return 1 fi else local -a suf if ! compset -S '::*'; then if (( $+opts[-a] )) || zstyle -T ":completion:${curcontext}:repositories" repository-suffix; then suf=( -S '::' ) fi local oqrepo="$PREFIX$SUFFIX" local qrepo=$oqrepo [[ $compstate[quote] != (\'|\") ]] && qrepo=${(Q)qrepo} if __borg_is_borg_repo $qrepo; then qrepo=${oqrepo%%/} [[ -z $SUFFIX ]] && PREFIX=${PREFIX%%/} || SUFFIX=${SUFFIX%%/} compadd -S '::' -r ':/ \t\n\-' -Q -- $qrepo return fi fi _borg_repository "$suf[@]" fi } # _borg_archive [-F] [-n] [qrepo] # # -F don't apply archive filter options on the command line # -n reverse order, disable matchers and don't do menu completion/selection (( $+functions[_borg_archive] )) || _borg_archive() { local -A opts zparseopts -A opts -D -E F n local qrepo=$1 if [[ -z $qrepo ]]; then if [[ -n $BORG_REPO ]]; then qrepo=${(q)BORG_REPO} else _message 'no repository specified' return 1 fi fi local -i ret=1 _tags archives while _tags; do if _requested archives; then local -a expl disp archive_filters local -i reversed_order=1 if (( ! $+opts[-F] )); then local -a archive_filter_options=( -P --prefix -a --glob-archives --first --last --sort-by ) tmp local k for k in $archive_filter_options; do if [[ -n $opt_args[$k] ]]; then IFS=: read -A tmp <<<$opt_args[$k] archive_filters+=( $k=${^tmp:#} ) fi done fi if (( $+opts[-n] )); then __borg_skip_pattern_matching || return 1 disp+=( -U ) compstate[insert]='' compstate[list]='list force' reversed_order=0 fi local -a asort zstyle -a ":completion:${curcontext}:archives" archive-sort asort if (( $asort[(I)inverse] )); then (( reversed_order = ! reversed_order )) fi local -a sort_by=( --sort-by=${(M)^asort:#(timestamp|name|id)} ) # NOTE: in case of option repetition, the later one takes precedence if (( ! $+__borg_archives_need_update )); then comppostfuncs+=( __borg_unset_archives_need_update ) typeset -gHi __borg_archives_need_update=1 if (( ! $#archive_filters && ! $+opts[-n] )); then local erepo [[ -n $1 ]] && __borg_expand_path ${(Q)qrepo} erepo local -a newest_file=( $erepo/(hints|index|integrity).<1->(#qN.om[1]) ) if [[ -n $newest_file ]]; then if zmodload -F zsh/stat b:zstat 2>/dev/null; then local -a stats zstat -A stats +mtime $newest_file local -i mtime=$stats[1] if [[ $__borg_prev_repo == $erepo && __borg_prev_mtime -ge mtime && $__borg_prev_order == $reversed_order && $__borg_prev_sort_by == $sort_by ]] then __borg_archives_need_update=0 else typeset -gH __borg_prev_repo=$erepo typeset -gHi __borg_prev_mtime=mtime __borg_prev_order=reversed_order typeset -gHa __borg_prev_sort_by=( $sort_by ) fi fi fi else unset __borg_prev_{repo,mtime,order,sort_by} comppostfuncs+=( __borg_unset_archives ) fi fi if zstyle -t ":completion:${curcontext}:archives" verbose; then if (( __borg_archives_need_update || ! $+__borg_archive_names || ! $+__borg_archive_descriptions )); then __borg_archives_need_update=0 typeset -gHa __borg_archive_names=() __borg_archive_descriptions=() local fmt descfmt name desc zstyle -s ":completion:${curcontext}:archives" archive-description-format descfmt || descfmt='{archive:<36} {time} [{id}]' fmt="{barchive}{NUL}$descfmt{NUL}" _call_program -p archive-descriptions \ ${(q)__borg_command:-borg} list --format=${(q)fmt} ${(q)sort_by} $archive_filters $qrepo 2>/dev/null | while IFS= read -r -d $'\0' name && IFS= read -r -d $'\0' descr; do __borg_archive_names[1,0]=( $name ) __borg_archive_descriptions[1,0]=( "$descr" ) done (( $pipestatus[1] )) && { _message "couldn't list repository: ${(Q)qrepo}" unset __borg_prev_{repo,mtime,order,sort_by} return 1 } (( ! reversed_order )) && __borg_archive_names=( "${(@aO)__borg_archive_names}" ) && __borg_archive_descriptions=( "${(@aO)__borg_archive_descriptions}" ) fi disp+=( -ld __borg_archive_descriptions ) elif (( __borg_archives_need_update || ! $+__borg_archive_names )); then __borg_archives_need_update=0 typeset -gHa __borg_archive_names=() local fmt='{barchive}{NUL}' __borg_archive_names=( ${(@0aO)"$(_call_program -p archives \ ${(q)__borg_command:-borg} list --format=${(q)fmt} ${(q)sort_by} $archive_filters $qrepo 2>/dev/null)"} ) (( $pipestatus[1] )) && { _message "couldn't list repository: ${(Q)qrepo}" unset __borg_prev_{repo,mtime,order,sort_by} return 1 } (( ! reversed_order )) && __borg_archive_names=( "${(@aO)__borg_archive_names}" ) fi _all_labels archives expl 'ARCHIVE' compadd "$disp[@]" -a __borg_archive_names && ret=0 fi (( ret )) || return 0 done return 1 } (( $+functions[__borg_unset_archives] )) || __borg_unset_archives() { unset __borg_archive_names __borg_archive_descriptions } (( $+functions[__borg_unset_archives_need_update] )) || __borg_unset_archives_need_update() { unset __borg_archives_need_update } (( $+functions[__borg_is_borg_repo] )) || __borg_is_borg_repo() { local repo=$1 __borg_expand_path $repo repo if [[ -d $repo && -d $repo/data ]]; then local -a files=( $repo/(hints|index|integrity).<1->(#qN.) ) (( $#files >= 3 )) && return 0 fi return 1 } (( $+functions[__borg_expand_path] )) || __borg_expand_path() { local _path=$1 local -a match mbegin mend if [[ $_path == (#b)(\~[^/]#)(|/*) ]]; then local etilde etilde=$~match[1] 2>/dev/null _path="$etilde$match[2]" fi _path=${(e)_path//\\\\/\\\\\\\\} eval typeset -g ${2:-REPLY}=\$_path } (( $+functions[_borg_placeholder_or_archive] )) || _borg_placeholder_or_archive() { local qrepo=$1 shift _alternative \ 'placeholders: :_borg_placeholders' \ "archives: : _borg_archive ${(q)qrepo}" } (( $+functions[_borg_placeholders] )) || _borg_placeholders() { local -a placeholders=( 'hostname:The (short) hostname of the machine.' 'fqdn:The full name of the machine.' 'reverse-fqdn:The full name of the machine in reverse domain name notation.' 'now:The current local date and time, by default in ISO-8601 format. You can also supply your own format string, e.g. {now:%Y-%m-%d_%H:%M:%S}' 'utcnow:The current UTC date and time, by default in ISO-8601 format. You can also supply your own format string, e.g. {utcnow:%Y-%m-%d_%H:%M:%S}' 'user:The user name (or UID, if no name is available) of the user running borg.' 'pid:The current process ID.' 'borgversion:The version of borg, e.g.: 1.0.8rc1' 'borgmajor:The version of borg, only the major version, e.g.: 1' 'borgminor:The version of borg, only major and minor version, e.g.: 1.0' 'borgpatch:The version of borg, only major, minor and patch version, e.g.: 1.0.8' ) __borg_complete_keys _describe -t placeholders 'placeholder' placeholders '"$copts[@]"' } (( $+functions[_borg_format_keys] )) || _borg_format_keys() { local repo_or_arch=${(Q)1} local -a keys=( NEWLINE NL NUL SPACE TAB CR LF ) local -a repository_keys=( archive name barchive comment bcomment id start time end hostname username ) local -a archive_keys=( type mode uid gid user group path bpath source linktarget flags size csize dsize dcsize num_chunks unique_chunks mtime ctime atime isomtime isoctime isoatime blake2b blake2s md5 sha1 sha224 sha256 sha384 sha3_224 sha3_256 sha3_384 sha3_512 sha512 shake_128 shake_256 archiveid archivename extra health ) local akeys rkeys akeys='archive-keys:archive keys:compadd -a archive_keys' rkeys='repository-keys:repository keys:compadd -a repository_keys' local -a alts=( 'keys:keys:compadd -a keys' ) if [[ $repo_or_arch == *::?* ]]; then alts+=( $akeys ) elif [[ -n $repo_or_arch ]]; then alts+=( $rkeys ) else alts+=( $rkeys $akeys ) fi __borg_complete_keys _alternative -O copts ${(q)alts} } (( $+functions[__borg_complete_keys] )) || __borg_complete_keys() { compset -P '*[^A-Za-z]##' compset -S '[^A-Za-z]##*' [[ -n $ISUFFIX ]] && compstate[to_end]='' # NOTE: `[[ -n $ISUFFIX ]]` is a workarond for a bug that causes cursor movement to the right further than it should # NOTE: the _oldlist completer doesn't respect compstate[to_end]='' local ipref suf if [[ $IPREFIX[-1] != '{' ]]; then ipref='{' [[ $compstate[quote] != (\'|\") ]] && ipref='\{' fi if [[ $ISUFFIX[1] != (|\\)\} ]]; then suf='}' [[ $compstate[quote] != (\'|\") ]] && suf='\}' fi local -a copts=( -i "$ipref" -S "$suf" ) eval "$@" } # _borg_style_selector_or_archive_files [-e] [-p] archive default_style_selector # # -e apply exclusion options on the command line # -p complete `--pattern` # -f complete files rather than borg paths (( $+functions[_borg_style_selector_or_archive_files] )) || _borg_style_selector_or_archive_files() { local -A opts zparseopts -A opts -D -E e p f local arch=$1 default_style_selector=$2 shift 2 local -a match mbegin mend expl tags=( style-selectors archive-files ) ss_suf=( -S ':' -r ':' ) (( $+opts[-f] )) && tags=( style-selectors files ) local -i ret=1 if (( $+opts[-p] )); then if ! compset -P '(#b)([RP\+\-\!])'; then local -a pattern_rules=( 'P:pattern style' 'R:root path' '+:include' '-:exclude' '!:exclude non-recurse' ) _describe -t pattern-rules 'pattern rule' pattern_rules -S '' return else if [[ $compstate[quote] == (\'|\") ]]; then compset -P ' #' else compset -P '(\\ )#' fi if [[ $match[1] == 'R' ]]; then default_style_selector='pp' elif [[ $match[1] == 'P' ]]; then tags=( style-selectors ) ss_suf=() fi fi fi _tags $tags while _tags; do if _requested style-selectors; then _all_labels style-selectors expl 'style selector' \ __borg_style_selectors $default_style_selector "$ss_suf[@]" - && ret=0 fi if _requested archive-files; then _all_labels archive-files expl 'PATTERN' \ __borg_archive_files ${(k)opts} "$arch" $default_style_selector - && ret=0 fi if _requested files; then local -a borg_paths=( ${(Q)${(e)${~@}}} ) _all_labels files expl 'PATH' \ __borg_pattern_files ${(k)opts} borg_paths - && ret=0 fi (( ret )) || return 0 done return 1 } (( $+functions[__borg_style_selectors] )) || __borg_style_selectors() { local default_style_selector=$1 path_style_selector shift zstyle -s ":completion:${curcontext}:archive-files" path-style-selector path_style_selector || path_style_selector='fm' local -a disp local -A style_selectors __borg_setup_style_selectors if zstyle -T ":completion:${curcontext}:style-selectors" verbose; then local -a style_selector_descriptions extra local k v sep for k v in ${(kv)style_selectors}; do extra=() [[ $k == $default_style_selector ]] && extra+=( 'default' ) [[ $k == $path_style_selector ]] && __borg_choose_path_or_pattern "" "$default_style_selector" && extra+=( 'path' ) (( $#extra )) && v+=" (${(j:, :)extra})" style_selector_descriptions+=( "${${k//\\/\\\\}//:/\\:}:$v" ) done zstyle -s ":completion:${curcontext}:style-selectors" list-separator sep || sep=-- zformat -a style_selector_descriptions " $sep " $style_selector_descriptions disp=( -ld style_selector_descriptions ) fi compadd "$disp[@]" "$@" -k style_selectors } (( $+functions[__borg_archive_files] )) || __borg_archive_files() { local -A opts zparseopts -A opts -D e p local arch=$1 default_style_selector=$2 shift 2 if [[ -z $arch || $arch != *::?* ]]; then _message 'no archive specified' return 1 fi local -a qargs tmp disp pref match mbegin mend archive_files descs local -A style_selectors local k cword fmt descfmt style_selector path_style_selector name descr # take into account exclude options on the command line if (( $+opts[-e] )); then local -a exclude_options=( -e --exclude --exclude-from --pattern --pattern-from ) local -a excludes for k in $exclude_options; do if [[ -n $opt_args[$k] ]]; then IFS=: read -A tmp <<<$opt_args[$k] excludes+=( $k="${^tmp[@]}" ) fi done [[ -n $excludes ]] && qargs+=( "$excludes[@]" ) fi (( $_matcher_num > 1 )) && return 1 __borg_skip_pattern_matching || return 1 cword="$PREFIX$SUFFIX" [[ $compstate[quote] != (\'|\") ]] && cword=${(Q)cword} [[ -z $cword ]] && return 1 if zstyle -t ":completion:${curcontext}:archive-files" verbose; then zstyle -s ":completion:${curcontext}:archive-files" file-description-format descfmt || descfmt='{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}' fmt="{bpath}{NUL}$descfmt{NUL}" else fmt='{bpath}{NUL}' fi qargs+=( --format=${(q)fmt} ) qargs+=( $arch ) __borg_setup_style_selectors [[ $cword == (#b)(${~${(j:|:)${(kb)style_selectors}}}):* ]] && style_selector=$match[1] local -i path_expected=0 __borg_choose_path_or_pattern "$style_selector" $default_style_selector $cword && path_expected=1 if [[ -n $cword ]]; then if (( path_expected )); then [[ -n $style_selector ]] && compset -P "$style_selector:" && pref=( -P "$style_selector:" ) cword="$PREFIX$SUFFIX" [[ $compstate[quote] != (\'|\") ]] && cword=${(Q)cword} zstyle -s ":completion:${curcontext}:archive-files" path-style-selector path_style_selector || path_style_selector='fm' cword="$path_style_selector:$cword" else [[ -z $style_selector ]] && cword="$default_style_selector:$cword" fi qargs+=( ${(q)cword} ) fi if zstyle -t ":completion:${curcontext}:archive-files" verbose; then _call_program -p archive-file-descriptions ${(q)__borg_command:-borg} list $qargs 2>/dev/null | while IFS= read -r -d $'\0' name && IFS= read -r -d $'\0' descr; do archive_files+=( $name ) descs+=( $descr ) done (( $pipestatus[1] )) && { _message "couldn't list archive: ${(Q)arch}"; return 1 } disp=( -ld descs ) else archive_files=( ${(0)"$(_call_program -p archive-files ${(q)__borg_command:-borg} list $qargs 2>/dev/null)"} ) (( $pipestatus[1] )) && { _message "couldn't list archive: ${(Q)arch}"; return 1 } fi if (( $#archive_files )); then if (( path_expected )); then compstate[insert]='automenu' else compstate[insert]='' compstate[list]='list force' fi fi compadd "$pref[@]" -U "$disp[@]" "$@" -a archive_files } (( $+functions[__borg_choose_path_or_pattern] )) || __borg_choose_path_or_pattern() { local ss=$1 defss=$2 cword=$3 shift 2 [[ $ss == (pp|pf) || ( -z $ss && $defss == (pp|pf) ) ]] } # transform borg exclude patterns into zsh ignore patterns and then complete files (( $+functions[__borg_pattern_files] )) || __borg_pattern_files() { local -A opts zparseopts -A opts -D -E e p f local paths_varname=$1 shift local -a args local -A style_selectors __borg_setup_style_selectors local pr_pat='[RP\+\-\!]' ss_pat="(${(j:|:)${(@kb)style_selectors}}):" local prs_pat="$pr_pat #" if (( $+opts[-e] )); then local -a borg_excludes exclude_options=( -e --exclude --pattern ) tmp local k cword local -i i for k in $exclude_options; do if [[ -n $opt_args[$k] ]]; then IFS=: read -A tmp <<<$opt_args[$k] tmp=( ${(Q)tmp} ) # lstrip style selectors and pattern rules [[ $+opts[-p] -gt 0 || $k == --pattern ]] && tmp=( ${tmp#$~prs_pat} ) tmp=( ${tmp#$~ss_pat} ) # don't take into account the word under the cursor cword="$PREFIX$SUFFIX" [[ $compstate[quote] != (\'|\") ]] && cword=${(Q)cword} [[ $+opts[-p] -gt 0 || $k == --pattern ]] && cword=${cword#$~prs_pat} cword=${cword#$~ss_pat} i=$tmp[(I)$cword] (( i )) && tmp=( "${(@)tmp[1,i-1]}" "${(@)tmp[i+1,-1]}" ) borg_excludes+=( "$tmp[@]" ) fi done [[ -n $borg_excludes ]] && args+=( -F borg_excludes ) fi [[ -n ${(P)paths_varname} ]] && args+=( -W $paths_varname ) args+=( "$@" ) # lstrip style selectors and pattern rules if (( $+opts[-p] )); then if [[ $compstate[quote] != (\'|\") ]]; then compset -P $pr_pat compset -P '(\\ )#' else compset -P $prs_pat fi fi compset -P $ss_pat compstate[insert]='' compstate[list]='list force' _path_files "$args[@]" } (( $+functions[__borg_setup_style_selectors] )) || __borg_setup_style_selectors() { typeset -gA style_selectors=( fm 'Fnmatch' sh 'Shell-style patterns' re 'Regular expressions' pp 'Path prefix' pf 'Path full-match' ) } (( $+functions[__borg_skip_pattern_matching] )) || __borg_skip_pattern_matching() { # unset glob_complete [[ $compstate[pattern_match] == '*' ]] && compstate[pattern_match]='' # skip the _match completer [[ -n $compstate[pattern_match] ]] && return 1 return 0 } (( $+functions[_borg_config] )) || _borg_config() { local qrepo=$1 shift if (( ! $+__borg_config_sect )); then comppostfuncs+=( __borg_unset_config ) typeset -gH __borg_config_sect= typeset -gHa __borg_config_keys=() local sect line local -a match mbegin mend _call_program -p keys ${(q)__borg_command:-borg} config --list $qrepo 2>/dev/null | { IFS= read -r sect sect=${${sect#\[}%\]} __borg_config_sect=$sect while IFS= read -r line && [[ $line == (#b)(*)\ =\ (*) ]]; do __borg_config_keys+=( "${${match[1]//\\/\\\\}//:/\\:}:(current: $match[2])" ) done } fi local -a alts=( 'keys:key: _describe -t keys "key" __borg_config_keys' ) compset -P "${__borg_config_sect}." || alts+=( 'sections:section:compadd -S "." $__borg_config_sect' ) _alternative $alts } (( $+functions[__borg_unset_config] )) || __borg_unset_config() { unset __borg_config_sect __borg_config_keys } # A simple prefix-oriented completion function for compressors. Can be improved by supporting the suffix. (( $+functions[_borg_compression] )) || _borg_compression() { local -a nolvl=( 'none:do not compress' 'lz4:very high speed, very low compression' ) local -a havelvl=( 'zstd:("zstandart")' 'zlib:("gz") medium speed, medium compression' 'lzma:("xz") low speed, high compression' ) local -a auto=( 'auto:compress compressible, otherwise "none"' ) local -a match mbegin mend # NOTE: Zsh's `-prefix` condition is confused by the leading parenthesis in the pattern. # Fortunately, we simply need to show a message. if compset -P '(#b)(|auto,)(zstd|zlib|lzma),'; then local -i from to def case $match[2] in (zstd) from=1 to=22 def=3 ;; (zlib|lzma) from=0 to=9 def=6 ;; esac _message -e "compression level (from $from to $to, default: $def)" elif compset -P 'auto,'; then _describe -t compression 'compression' nolvl -- havelvl -qS, else _describe -t compression 'compression' nolvl -- havelvl -qS, -- auto -S, fi } (( $+functions[_borg_chunker_params] )) || _borg_chunker_params() { if compset -P '*,*,*,'; then _message -e 'HASH_WINDOW_SIZE' elif compset -P '*,*,'; then _message -e 'HASH_MASK_BITS (target chunk size ~= 2^HASH_MASK_BITS B)' elif compset -P '*,'; then _message -e 'CHUNK_MAX_EXP (maximum chunk size = 2^CHUNK_MAX_EXP B)' else _message -e 'CHUNK_MIN_EXP (minimum chunk size = 2^CHUNK_MIN_EXP B)' local -a params=( 'default:19,23,21,4095' '19,23,21,4095:small amount of chunks (default)' '10,23,16,4095:big amount of chunks' ) _describe -t chunker-params 'typical chunker params' params fi } (( $+functions[_borg_statuschars] )) || _borg_statuschars() { _values -s '' 'STATUSCHARS' \ 'A[regular file, added]' \ 'M[regular file, modified]' \ 'U[regular file, unchanged]' \ 'E[regular file, an error happened while accessing/reading this file]' \ 'd[directory]' \ 'b[block device]' \ 'c[char device]' \ 'h[regular file, hardlink (to already seen inodes)]' \ 's[symlink]' \ 'f[fifo]' \ 'i[backup data was read from standard input (stdin)]' \ '-[dry run, item was not backed up]' \ 'x[excluded, item was not backed up]' \ '?[missing status code]' } (( $+functions[_borg_quota_suffixes] )) || _borg_quota_suffixes() { if compset -P '[0-9]##'; then local -a suffixes=( 'K:10 ** 3 bytes' 'M:10 ** 6 bytes' 'G:10 ** 9 bytes' 'T:10 ** 12 bytes' 'P:10 ** 15 bytes' ) # NOTE: tag `suffixes` is already in use (file extensions) _describe -t multiplier 'suffix' suffixes else _message -e 'QUOTA' fi } (( $+functions[_borg_timestamp] )) || _borg_timestamp() { _alternative \ "dates:TIMESTAMP: _dates -f '%FT%T'" \ 'files:reference:_files' } (( $+functions[_borg_guard_unsigned_number] )) || _borg_guard_unsigned_number() { local -A opts zparseopts -K -D -A opts M+: J+: V+: 1 2 o+: n F: x+: X+: _guard '[0-9]#' ${1:-number} } (( $+functions[_borg_guard_numeric_mode] )) || _borg_guard_numeric_mode() { local -A opts zparseopts -K -D -A opts M+: J+: V+: 1 2 o+: n F: x+: X+: _guard '[0-7](#c0,4)' ${1:-mode} } _borg() { local -a match mbegin mend line state local curcontext="$curcontext" state_descr typeset -A opt_args local -i ret=1 if [[ $service == 'borg' ]]; then local __borg_command=$words[1] local -a common_options __borg_setup_common_options _arguments -s -w -C : \ '(- :)'{-V,--version}'[show version number and exit]' \ $common_options \ '(-): :->command' \ '(-)*:: :->option-or-argument' && return case $state in (command) _borg_commands && ret=0 ;; (option-or-argument) curcontext="${curcontext%:*:*}:borg-$words[1]:" if ! _call_function ret _borg-$words[1]; then _default && ret=0 fi ;; esac elif [[ $service == (#b)-value-,BORG_(*),-default- ]]; then _borg_parameters $match[1] && ret=0 elif ! _call_function ret _$service; then _default && ret=0 fi return ret } _borg "$@" borgbackup-1.1.15/scripts/shell_completions/fish/0000755000175000017500000000000013771325773022056 5ustar useruser00000000000000borgbackup-1.1.15/scripts/shell_completions/fish/borg.fish0000644000175000017500000007764513771325506023677 0ustar useruser00000000000000# Completions for borg # https://www.borgbackup.org/ # Note: # Listing archives works on password protected repositories only if $BORG_PASSPHRASE is set. # Install: # Copy this file to /usr/share/fish/vendor_completions.d/ # Commands complete -c borg -f -n __fish_is_first_token -a 'init' -d 'Initialize an empty repository' complete -c borg -f -n __fish_is_first_token -a 'create' -d 'Create new archive' complete -c borg -f -n __fish_is_first_token -a 'extract' -d 'Extract archive contents' complete -c borg -f -n __fish_is_first_token -a 'check' -d 'Check repository consistency' complete -c borg -f -n __fish_is_first_token -a 'rename' -d 'Rename an existing archive' complete -c borg -f -n __fish_is_first_token -a 'list' -d 'List archive or repository contents' complete -c borg -f -n __fish_is_first_token -a 'diff' -d 'Find differences between archives' complete -c borg -f -n __fish_is_first_token -a 'delete' -d 'Delete a repository or archive' complete -c borg -f -n __fish_is_first_token -a 'prune' -d 'Prune repository archives' complete -c borg -f -n __fish_is_first_token -a 'info' -d 'Show archive details' complete -c borg -f -n __fish_is_first_token -a 'mount' -d 'Mount archive or a repository' complete -c borg -f -n __fish_is_first_token -a 'umount' -d 'Un-mount the mounted archive' function __fish_borg_seen_key if __fish_seen_subcommand_from key and not __fish_seen_subcommand_from import export change-passphrase return 0 end return 1 end complete -c borg -f -n __fish_is_first_token -a 'key' -d 'Manage a repository key' complete -c borg -f -n __fish_borg_seen_key -a 'import' -d 'Import a repository key' complete -c borg -f -n __fish_borg_seen_key -a 'export' -d 'Export a repository key' complete -c borg -f -n __fish_borg_seen_key -a 'change-passphrase' -d 'Change key file passphrase' complete -c borg -f -n __fish_is_first_token -a 'serve' -d 'Start in server mode' complete -c borg -f -n __fish_is_first_token -a 'upgrade' -d 'Upgrade a repository' complete -c borg -f -n __fish_is_first_token -a 'recreate' -d 'Recreate contents of existing archives' complete -c borg -f -n __fish_is_first_token -a 'export-tar' -d 'Create tarball from an archive' complete -c borg -f -n __fish_is_first_token -a 'with-lock' -d 'Run a command while repository lock held' complete -c borg -f -n __fish_is_first_token -a 'break-lock' -d 'Break the repository lock' complete -c borg -f -n __fish_is_first_token -a 'config' -d 'Get/set options in repo/cache config' function __fish_borg_seen_benchmark if __fish_seen_subcommand_from benchmark and not __fish_seen_subcommand_from crud return 0 end return 1 end complete -c borg -f -n __fish_is_first_token -a 'benchmark' -d 'Benchmark borg operations' complete -c borg -f -n __fish_borg_seen_benchmark -a 'crud' -d 'Benchmark borg CRUD operations' function __fish_borg_seen_help if __fish_seen_subcommand_from help and not __fish_seen_subcommand_from patterns placeholders compression return 0 end return 1 end complete -c borg -f -n __fish_is_first_token -a 'help' -d 'Miscellaneous Help' complete -c borg -f -n __fish_borg_seen_help -a 'patterns' -d 'Help for patterns' complete -c borg -f -n __fish_borg_seen_help -a 'placeholders' -d 'Help for placeholders' complete -c borg -f -n __fish_borg_seen_help -a 'compression' -d 'Help for compression' # Common options complete -c borg -f -s h -l 'help' -d 'Show help information' complete -c borg -f -l 'version' -d 'Show version information' complete -c borg -f -l 'critical' -d 'Log level CRITICAL' complete -c borg -f -l 'error' -d 'Log level ERROR' complete -c borg -f -l 'warning' -d 'Log level WARNING (default)' complete -c borg -f -l 'info' -d 'Log level INFO' complete -c borg -f -s v -l 'verbose' -d 'Log level INFO' complete -c borg -f -l 'debug' -d 'Log level DEBUG' complete -c borg -f -l 'debug-topic' -d 'Enable TOPIC debugging' complete -c borg -f -s p -l 'progress' -d 'Show progress information' complete -c borg -f -l 'log-json' -d 'Output one JSON object per log line' complete -c borg -f -l 'lock-wait' -d 'Wait for lock max N seconds [1]' complete -c borg -f -l 'show-version' -d 'Log version information' complete -c borg -f -l 'show-rc' -d 'Log the return code' complete -c borg -f -l 'umask' -d 'Set umask to M [0077]' complete -c borg -l 'remote-path' -d 'Use PATH as remote borg executable' complete -c borg -f -l 'remote-ratelimit' -d 'Set remote network upload RATE limit' complete -c borg -f -l 'consider-part-files' -d 'Treat part files like normal files' complete -c borg -l 'debug-profile' -d 'Write execution profile into FILE' complete -c borg -l 'rsh' -d 'Use COMMAND instead of ssh' # borg init options set -l encryption_modes "none keyfile keyfile-blake2 repokey repokey-blake2 authenticated authenticated-blake2" complete -c borg -f -s e -l 'encryption' -d 'Encryption key MODE' -a "$encryption_modes" -n "__fish_seen_subcommand_from init" complete -c borg -f -l 'append-only' -d 'Create an append-only mode repository' -n "__fish_seen_subcommand_from init" complete -c borg -f -l 'storage-quota' -d 'Set storage QUOTA of the repository' -n "__fish_seen_subcommand_from init" complete -c borg -f -l 'make-parent-dirs' -d 'Create parent directories' -n "__fish_seen_subcommand_from init" # borg create options complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from create" complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'filter' -d 'Only items with given STATUSCHARS' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'json' -d 'Print verbose stats as json' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'no-cache-sync' -d 'Do not synchronize the cache' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'stdin-name' -d 'Use NAME in archive for stdin data' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'stdin-user' -d 'Set user USER in archive for stdin data [root]' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'stdin-group' -d 'Set group GROUP in archive for stdin data [root]' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'stdin-mode' -d 'Set mode to M in archive for stdin data [0660]' -n "__fish_seen_subcommand_from create" # Exclusion options complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from create" complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from create" complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'exclude-caches' -d 'Exclude directories tagged as cache' -n "__fish_seen_subcommand_from create" complete -c borg -l 'exclude-if-present' -d 'Exclude directories that contain FILENAME' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'keep-exclude-tags' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'keep-tag-files' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'exclude-nodump' -d 'Exclude files flagged NODUMP' -n "__fish_seen_subcommand_from create" # Filesytem options complete -c borg -f -s x -l 'one-file-system' -d 'Stay in the same file system' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'numeric-owner' -d 'Only store numeric user:group identifiers' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'noatime' -d 'Do not store atime' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'noctime' -d 'Do not store ctime' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'nobirthtime' -d 'Do not store creation date' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'nobsdflags' -d 'Do not store bsdflags' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'ignore-inode' -d 'Ignore inode data in file metadata cache' -n "__fish_seen_subcommand_from create" set -l files_cache_mode "ctime,size,inode mtime,size,inode ctime,size mtime,size rechunk,ctime rechunk,mtime disabled" complete -c borg -f -l 'files-cache' -d 'Operate files cache in MODE' -a "$files_cache_mode" -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'read-special' -d 'Open device files like regular files' -n "__fish_seen_subcommand_from create" # Archive options complete -c borg -f -l 'comment' -d 'Add COMMENT to the archive' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'timestamp' -d 'Set creation TIME (yyyy-mm-ddThh:mm:ss)' -n "__fish_seen_subcommand_from create" complete -c borg -l 'timestamp' -d 'Set creation time by reference FILE' -n "__fish_seen_subcommand_from create" complete -c borg -f -s c -l 'checkpoint-interval' -d 'Write checkpoint every N seconds [1800]' -n "__fish_seen_subcommand_from create" complete -c borg -f -l 'chunker-params' -d 'Chunker PARAMETERS [19,23,21,4095]' -n "__fish_seen_subcommand_from create" set -l compression_methods "none auto lz4 zstd,1 zstd,2 zstd,3 zstd,4 zstd,5 zstd,6 zstd,7 zstd,8 zstd,9 zstd,10 zstd,11 zstd,12 zstd,13 zstd,14 zstd,15 zstd,16 zstd,17 zstd,18 zstd,19 zstd,20 zstd,21 zstd,22 zlib,1 zlib,2 zlib,3 zlib,4 zlib,5 zlib,6 zlib,7 zlib,8 zlib,9 lzma,0 lzma,1 lzma,2 lzma,3 lzma,4 lzma,5 lzma,6 lzma,7 lzma,8 lzma,9" complete -c borg -f -s C -l 'compression' -d 'Select compression ALGORITHM,LEVEL [lz4]' -a "$compression_methods" -n "__fish_seen_subcommand_from create" # borg extract options complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from extract" complete -c borg -f -s n -l 'dry-run' -d 'Do not actually extract any files' -n "__fish_seen_subcommand_from extract" complete -c borg -f -l 'numeric-owner' -d 'Only obey numeric user:group identifiers' -n "__fish_seen_subcommand_from extract" complete -c borg -f -l 'nobsdflags' -d 'Do not extract/set bsdflags' -n "__fish_seen_subcommand_from extract" complete -c borg -f -l 'stdout' -d 'Write all extracted data to stdout' -n "__fish_seen_subcommand_from extract" complete -c borg -f -l 'sparse' -d 'Create holes in output sparse file' -n "__fish_seen_subcommand_from extract" # Exclusion options complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from extract" complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from extract" complete -c borg -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from extract" complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from extract" complete -c borg -f -l 'strip-components' -d 'Remove NUMBER of leading path elements' -n "__fish_seen_subcommand_from extract" # borg check options complete -c borg -f -l 'repository-only' -d 'Only perform repository checks' -n "__fish_seen_subcommand_from check" complete -c borg -f -l 'archives-only' -d 'Only perform archives checks' -n "__fish_seen_subcommand_from check" complete -c borg -f -l 'verify-data' -d 'Cryptographic integrity verification' -n "__fish_seen_subcommand_from check" complete -c borg -f -l 'repair' -d 'Attempt to repair found inconsistencies' -n "__fish_seen_subcommand_from check" complete -c borg -f -l 'save-space' -d 'Work slower but using less space' -n "__fish_seen_subcommand_from check" # Archive filters complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from check" complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from check" set -l sort_keys "timestamp name id" complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from check" complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from check" complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from check" # borg rename # no specific options # borg list options complete -c borg -f -l 'short' -d 'Only print file/directory names' -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'list-format' -d 'Specify FORMAT for file listing' -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'format' -d 'Specify FORMAT for file listing' -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'json' -d 'List contents in json format' -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'json-lines' -d 'List contents in json lines format' -n "__fish_seen_subcommand_from list" # Archive filters complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from list" complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from list" # Exclusion options complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from list" complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from list" complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from list" complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from list" # borg diff options complete -c borg -f -l 'numeric-owner' -d 'Only consider numeric user:group' -n "__fish_seen_subcommand_from diff" complete -c borg -f -l 'same-chunker-params' -d 'Override check of chunker parameters' -n "__fish_seen_subcommand_from diff" complete -c borg -f -l 'sort' -d 'Sort the output lines by file path' -n "__fish_seen_subcommand_from diff" # Exclusion options complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from diff" complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from diff" complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from diff" complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from diff" # borg delete options complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from delete" complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from delete" complete -c borg -f -l 'cache-only' -d "Delete only the local cache" -n "__fish_seen_subcommand_from delete" complete -c borg -f -l 'force' -d 'Force deletion of corrupted archives' -n "__fish_seen_subcommand_from delete" complete -c borg -f -l 'save-space' -d 'Work slower but using less space' -n "__fish_seen_subcommand_from delete" # Archive filters complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from delete" complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from delete" complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from delete" complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from delete" complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from delete" # borg prune options complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from prune" complete -c borg -f -l 'force' -d 'Force pruning of corrupted archives' -n "__fish_seen_subcommand_from prune" complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from prune" complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from prune" complete -c borg -f -l 'keep-within' -d 'Keep archives within time INTERVAL' -n "__fish_seen_subcommand_from prune" complete -c borg -f -l 'keep-last' -d 'NUMBER of secondly archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -l 'keep-secondly' -d 'NUMBER of secondly archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -l 'keep-minutely' -d 'NUMBER of minutely archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -s H -l 'keep-hourly' -d 'NUMBER of hourly archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -s d -l 'keep-daily' -d 'NUMBER of daily archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -s w -l 'keep-weekly' -d 'NUMBER of weekly archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -s m -l 'keep-monthly' -d 'NUMBER of monthly archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -s y -l 'keep-yearly' -d 'NUMBER of yearly archives to keep' -n "__fish_seen_subcommand_from prune" complete -c borg -f -l 'save-space' -d 'Work slower but using less space' -n "__fish_seen_subcommand_from prune" # Archive filters complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from prune" complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from prune" # borg info options complete -c borg -f -l 'json' -d 'Format output in json format' -n "__fish_seen_subcommand_from info" # Archive filters complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from info" complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from info" complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from info" complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from info" complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from info" # borg mount options complete -c borg -f -s f -l 'foreground' -d 'Stay in foreground, do not daemonize' -n "__fish_seen_subcommand_from mount" # FIXME This list is probably not full, but I tried to pick only those that are relevant to borg mount -o: set -l fuse_options "ac_attr_timeout= allow_damaged_files allow_other allow_root attr_timeout= auto auto_cache auto_unmount default_permissions entry_timeout= gid= group_id= kernel_cache max_read= negative_timeout= noauto noforget remember= remount rootmode= uid= umask= user user_id= versions" complete -c borg -f -s o -d 'Fuse mount OPTION' -a "$fuse_options" -n "__fish_seen_subcommand_from mount" # Archive filters complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from mount" complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from mount" complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from mount" complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from mount" complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from mount" # Exclusion options complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from mount" complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from mount" complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from mount" complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from mount" complete -c borg -f -l 'strip-components' -d 'Remove NUMBER of leading path elements' -n "__fish_seen_subcommand_from mount" # borg umount # no specific options # borg key change-passphrase # no specific options # borg key export complete -c borg -f -l 'paper' -d 'Create an export for printing' -n "__fish_seen_subcommand_from export" complete -c borg -f -l 'qr-html' -d 'Create an html file for printing and qr' -n "__fish_seen_subcommand_from export" # borg key import complete -c borg -f -l 'paper' -d 'Import from a backup done with --paper' -n "__fish_seen_subcommand_from import" # borg upgrade complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from upgrade" complete -c borg -f -l 'inplace' -d 'Rewrite repository in place' -n "__fish_seen_subcommand_from upgrade" complete -c borg -f -l 'force' -d 'Force upgrade' -n "__fish_seen_subcommand_from upgrade" complete -c borg -f -l 'tam' -d 'Enable manifest authentication' -n "__fish_seen_subcommand_from upgrade" complete -c borg -f -l 'disable-tam' -d 'Disable manifest authentication' -n "__fish_seen_subcommand_from upgrade" # borg recreate complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'filter' -d 'Only items with given STATUSCHARS' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from recreate" # Exclusion options complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'exclude-caches' -d 'Exclude directories tagged as cache' -n "__fish_seen_subcommand_from recreate" complete -c borg -l 'exclude-if-present' -d 'Exclude directories that contain FILENAME' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'keep-exclude-tags' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'keep-tag-files' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from recreate" # Archive options complete -c borg -f -l 'target' -d "Create a new ARCHIVE" -n "__fish_seen_subcommand_from recreate" complete -c borg -f -s c -l 'checkpoint-interval' -d 'Write checkpoint every N seconds [1800]' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'comment' -d 'Add COMMENT to the archive' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'timestamp' -d 'Set creation TIME (yyyy-mm-ddThh:mm:ss)' -n "__fish_seen_subcommand_from recreate" complete -c borg -l 'timestamp' -d 'Set creation time using reference FILE' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -s C -l 'compression' -d 'Select compression ALGORITHM,LEVEL [lz4]' -a "$compression_methods" -n "__fish_seen_subcommand_from recreate" set -l recompress_when "if-different always never" complete -c borg -f -l 'recompress' -d 'Recompress chunks CONDITION' -a "$recompress_when" -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'chunker-params' -d 'Chunker PARAMETERS [19,23,21,4095]' -n "__fish_seen_subcommand_from recreate" # borg export-tar options complete -c borg -l 'tar-filter' -d 'Filter program to pipe data through' -n "__fish_seen_subcommand_from export-tar" complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from export-tar" # Exclusion options complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from recreate" complete -c borg -f -l 'strip-components' -d 'Remove NUMBER of leading path elements' -n "__fish_seen_subcommand_from recreate" # borg serve complete -c borg -l 'restrict-to-path' -d 'Restrict repository access to PATH' -n "__fish_seen_subcommand_from serve" complete -c borg -l 'restrict-to-repository' -d 'Restrict repository access at PATH' -n "__fish_seen_subcommand_from serve" complete -c borg -f -l 'append-only' -d 'Only allow appending to repository' -n "__fish_seen_subcommand_from serve" complete -c borg -f -l 'storage-quota' -d 'Override storage QUOTA of the repository' -n "__fish_seen_subcommand_from serve" # borg config complete -c borg -f -s c -l 'cache' -d 'Get/set/list values in the repo cache' -n "__fish_seen_subcommand_from config" complete -c borg -f -s d -l 'delete' -d 'Delete the KEY from the config' -n "__fish_seen_subcommand_from config" complete -c borg -f -l 'list' -d 'List the configuration of the repo' -n "__fish_seen_subcommand_from config" # borg with-lock # no specific options # borg break-lock # no specific options # borg benchmark # no specific options # borg help # no specific options # List repositories::archives function __fish_borg_is_argument_n --description 'Test if current argument is on Nth place' --argument n set tokens (commandline --current-process --tokenize --cut-at-cursor) set -l tokencount 0 for token in $tokens switch $token case '-*' # ignore command line switches case '*' set tokencount (math $tokencount+1) end end return (test $tokencount -eq $n) end function __fish_borg_is_dir_a_repository set -l config_content if test -f $argv[1]/README and test -f $argv[1]/config read config_content < $argv[1]/config ^/dev/null end return (string match --quiet '[repository]' $config_content) end function __fish_borg_list_repos_or_archives if string match --quiet --regex '.*::' '"'(commandline --current-token)'"' # If the current token contains "::" then list the archives: set -l repository_name (string replace --regex '::.*' '' (commandline --current-token)) borg list --format="$repository_name::{archive}{NEWLINE}" "$repository_name" ^/dev/null else # Otherwise list the repositories, directories and user@host entries: set -l directories (commandline --cut-at-cursor --current-token)*/ for directoryname in $directories if __fish_borg_is_dir_a_repository $directoryname printf '%s::\t%s\n' (string trim --right --chars='/' $directoryname) "Repository" else printf '%s\n' $directoryname end end __fish_complete_user_at_hosts | string replace --regex '$' ':' end end complete -c borg -f -n "__fish_borg_is_argument_n 2" -a '(__fish_borg_list_repos_or_archives)' # Second archive listing for borg diff function __fish_borg_is_diff_second_archive return (string match --quiet --regex ' diff .*::[^ ]+ '(commandline --current-token)'$' (commandline)) end function __fish_borg_list_diff_archives set -l repo_matches (string match --regex '([^ ]*)::' (commandline)) borg list --format="{archive}{NEWLINE}" "$repo_matches[2]" ^/dev/null end complete -c borg -f -n __fish_borg_is_diff_second_archive -a '(__fish_borg_list_diff_archives)' borgbackup-1.1.15/scripts/shell_completions/bash/0000755000175000017500000000000013771325773022042 5ustar useruser00000000000000borgbackup-1.1.15/scripts/shell_completions/bash/borg0000644000175000017500000002123513771325506022713 0ustar useruser00000000000000# Completions for borg # https://www.borgbackup.org/ # Note: # Listing archives works on password protected repositories only if $BORG_PASSPHRASE is set. # Install: # Copy this file to /usr/share/bash-completion/completions/ or /etc/bash_completion.d/ _borg() { compopt -o default COMPREPLY=() local cur="${COMP_WORDS[COMP_CWORD]}" local prev="${COMP_WORDS[COMP_CWORD-1]}" local prevprev="${COMP_WORDS[COMP_CWORD-2]}" local common_opts="-h --help --version --critical --error --warning --info -v --verbose --debug --debug-topic -p --progress --log-json --lock-wait --show-version --show-rc --umask --remote-path --remote-ratelimit --consider-part-files --debug-profile --rsh" local opts="${common_opts}" # Commands if [[ ${COMP_CWORD} == 1 ]] ; then local borg_commands="init create extract check rename list diff delete prune info mount umount key serve upgrade recreate export-tar with-lock break-lock config benchmark help" COMPREPLY=( $(compgen -W "${borg_commands}" -- ${cur}) ) compopt +o default return 0 fi case "${prev}" in 'key') COMPREPLY=( $(compgen -W "import export change-passphrase" -- ${cur}) ) return 0 ;; 'benchmark') COMPREPLY=( $(compgen -W "crud" -- ${cur}) ) return 0 ;; 'help') COMPREPLY=( $(compgen -W "patterns placeholders compression" -- ${cur}) ) return 0 ;; '--encryption' | '-e') local encryption_modes="none keyfile keyfile-blake2 repokey repokey-blake2 authenticated authenticated-blake2" COMPREPLY=( $(compgen -W "${encryption_modes}" -- ${cur}) ) return 0 ;; '--files-cache') local files_cache_mode="ctime,size,inode mtime,size,inode ctime,size mtime,size rechunk,ctime rechunk,mtime disabled" COMPREPLY=( $(compgen -W "${files_cache_mode}" -- ${cur}) ) return 0 ;; '--compression' | '-C') local compression_methods="none auto lz4 zstd,1 zstd,2 zstd,3 zstd,4 zstd,5 zstd,6 zstd,7 zstd,8 zstd,9 zstd,10 zstd,11 zstd,12 zstd,13 zstd,14 zstd,15 zstd,16 zstd,17 zstd,18 zstd,19 zstd,20 zstd,21 zstd,22 zlib,1 zlib,2 zlib,3 zlib,4 zlib,5 zlib,6 zlib,7 zlib,8 zlib,9 lzma,0 lzma,1 lzma,2 lzma,3 lzma,4 lzma,5 lzma,6 lzma,7 lzma,8 lzma,9" COMPREPLY=( $(compgen -W "${compression_methods}" -- ${cur}) ) return 0 ;; '--sort-by') local sort_keys="timestamp name id" COMPREPLY=( $(compgen -W "${sort_keys}" -- ${cur}) ) return 0 ;; '-o') # FIXME This list is probably not full, but I tried to pick only those that are relevant to borg mount -o: local fuse_options="ac_attr_timeout= allow_damaged_files allow_other allow_root attr_timeout= auto auto_cache auto_unmount default_permissions entry_timeout= gid= group_id= kernel_cache max_read= negative_timeout= noauto noforget remember= remount rootmode= uid= umask= user user_id= versions" COMPREPLY=( $(compgen -W "${fuse_options}" -- ${cur}) ) return 0 ;; '--recompress') local recompress_when="if-different always never" COMPREPLY=( $(compgen -W "${recompress_when}" -- ${cur}) ) return 0 ;; esac if [[ ${cur} == -* ]] ; then case "${COMP_LINE}" in *' init '*) local opts="-e --encryption --append-only --storage-quota --make-parent-dirs ${common_opts}" ;; *' create '*) local opts="-n --dry-run -s --stats --list --filter --json --no-cache-sync --stdin-name --stdin-user --stdin-group --stdin-mode -e --exclude --exclude-from --pattern --patterns-from --exclude-caches --exclude-if-present --keep-exclude-tags --keep-tag-files --exclude-nodump -x --one-file-system --numeric-owner --noatime --noctime --nobirthtime --nobsdflags --ignore-inode --files-cache --read-special --comment --timestamp -c --checkpoint-interval --chunker-params -C --compression ${common_opts}" ;; *' extract '*) local opts="--list -n --dry-run --numeric-owner --nobsdflags --stdout --sparse -e --exclude --exclude-from --pattern --patterns-from --strip-components ${common_opts}" ;; *' check '*) local opts="--repository-only --archives-only --verify-data --repair --save-space -P --prefix -a --glob-archives --sort-by --first --last ${common_opts}" ;; # rename # no specific options *" list "*) local opts="--short --list-format --format --json --json-lines -P --prefix -a --glob-archives --sort-by --first --last -e --exclude --exclude-from --pattern --patterns-from ${common_opts}" ;; *' diff '*) local opts="--numeric-owner --same-chunker-params --sort -e --exclude --exclude-from --pattern --patterns-from ${common_opts}" ;; *' delete '*) local opts="-n --dry-run -s --stats --cache-only --force --save-space -P --prefix -a --glob-archives --sort-by --first --last ${common_opts}" ;; *' prune '*) local opts="-n --dry-run --force -s --stats --list --keep-within --keep-last --keep-secondly --keep-minutely -H --keep-hourly -d --keep-daily -w --keep-weekly -m --keep-monthly -y --keep-yearly --save-space -P --prefix -a --glob-archives ${common_opts}" ;; *' info '*) local opts="--json -P --prefix -a --glob-archives --sort-by --first --last ${common_opts}" ;; *' mount '*) local opts="-f --foreground -o -P --prefix -a --glob-archives --sort-by --first --last -e --exclude --exclude-from --pattern --patterns-from --strip-components ${common_opts}" ;; # umount # no specific options # key change-passphrase # no specific options *' export '*) local opts="--paper --qr-html ${common_opts}" ;; *' import '*) local opts="--paper ${common_opts}" ;; *' upgrade '*) local opts="-n --dry-run --inplace --force --tam --disable-tam ${common_opts}" ;; *' recreate '*) local opts="--list --filter -n dry-run -s stats -e exclude --exclude-from --pattern --patterns-from --exclude-caches --exclude-if-present --keep-exclude-tags --keep-tag-files --target -c checkpoint-interval --comment --timestamp --timestamp -C compression --recompress --chunker-params ${common_opts}" ;; *' export-tar '*) local opts="--tar-filter --list -e exclude --exclude-from --pattern --patterns-from --strip-components ${common_opts}" ;; *' serve '*) local opts="--restrict-to-path --restrict-to-repository --append-only --storage-quota ${common_opts}" ;; *' config '*) local opts="-c --cache -d --delete --list ${common_opts}" ;; # with-lock # no specific options # break-lock # no specific options # benchmark crud # no specific options esac COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) return 0 fi # Listing archives. # Since "::" is treated as separate word in bash, # it is $cur when the cursor is right behind it # and $prev if the user has started to type an archive name. local repository_name="" # If set, we'll list the archives local typed_word="" if [[ ${cur} == "::" ]] ; then repository_name=${prev} fi if [[ ${prev} == "::" ]] ; then repository_name=${prevprev} typed_word=${cur} fi # Second archive listing for borg diff if [[ ${COMP_LINE} =~ ^.*\ diff\ .*::[^\ ]+\ ${cur}$ ]] ; then repository_name=`expr match "${COMP_LINE}" "\(.*\)::"` repository_name=${repository_name##* } typed_word=${cur} fi if [[ ${repository_name} != "" ]] ; then if [[ ${COMP_LINE} == *" ::"* ]] ; then # There is a space before the "::" # which means that no repository name was typed, # so probably $BORG_REPO is set. repository_name="" fi local archive_list=$(borg list --short "${repository_name}" 2>/dev/null) COMPREPLY=( $(compgen -W "${archive_list}" -- "${typed_word}" ) ) return 0 fi return 0 } complete -F _borg borg borgbackup-1.1.15/scripts/py36-blake2.py0000644000175000017500000000174613771325506017715 0ustar useruser00000000000000 """ This script checks compatibility of crypto.blake2b_256 against hashlib.blake2b in CPython 3.6. """ import hashlib import sys def test_b2(b2_input, b2_output): digest = hashlib.blake2b(b2_input, digest_size=32).digest() identical = b2_output == digest print('Input: ', b2_input.hex()) print('Expected: ', b2_output.hex()) print('Calculated:', digest.hex()) print('Identical: ', identical) print() if not identical: sys.exit(1) test_b2( bytes.fromhex('037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164'), bytes.fromhex('a22d4fc81bb61c3846c334a09eaf28d22dd7df08c9a7a41e713ef28d80eebd45') ) test_b2( b'abc', bytes.fromhex('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319') ) test_b2( bytes.fromhex('e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676') + b'1234567890' * 100, bytes.fromhex('97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2'), ) borgbackup-1.1.15/scripts/errorlist.py0000755000175000017500000000062413771325506020000 0ustar useruser00000000000000#!/usr/bin/env python3 from textwrap import indent import borg.archiver from borg.helpers import Error, ErrorWithTraceback classes = Error.__subclasses__() + ErrorWithTraceback.__subclasses__() for cls in sorted(classes, key=lambda cls: (cls.__module__, cls.__qualname__)): if cls is ErrorWithTraceback: continue print(' ', cls.__qualname__) print(indent(cls.__doc__, ' ' * 8)) borgbackup-1.1.15/scripts/glibc_check.py0000755000175000017500000000323113771325506020165 0ustar useruser00000000000000#!/usr/bin/env python3 """ Check if all given binaries work with the given glibc version. glibc_check.py 2.11 bin [bin ...] rc = 0 means "yes", rc = 1 means "no". """ import re import subprocess import sys verbose = True objdump = "objdump -T %s" glibc_re = re.compile(r'GLIBC_([0-9]\.[0-9]+)') def parse_version(v): major, minor = v.split('.') return int(major), int(minor) def format_version(version): return "%d.%d" % version def main(): given = parse_version(sys.argv[1]) filenames = sys.argv[2:] overall_versions = set() for filename in filenames: try: output = subprocess.check_output(objdump % filename, shell=True, stderr=subprocess.STDOUT) output = output.decode('utf-8') versions = set(parse_version(match.group(1)) for match in glibc_re.finditer(output)) requires_glibc = max(versions) overall_versions.add(requires_glibc) if verbose: print("%s %s" % (filename, format_version(requires_glibc))) except subprocess.CalledProcessError: if verbose: print("%s errored." % filename) wanted = max(overall_versions) ok = given >= wanted if verbose: if ok: print("The binaries work with the given glibc %s." % format_version(given)) else: print("The binaries do not work with the given glibc %s. " "Minimum is: %s" % (format_version(given), format_version(wanted))) return ok if __name__ == '__main__': ok = main() sys.exit(0 if ok else 1) borgbackup-1.1.15/scripts/fuzz-cache-sync/0000755000175000017500000000000013771325773020413 5ustar useruser00000000000000borgbackup-1.1.15/scripts/fuzz-cache-sync/HOWTO0000644000175000017500000000055113771325506021231 0ustar useruser00000000000000- Install AFL and the requirements for LLVM mode (see docs) - Compile the fuzzing target, e.g. AFL_HARDEN=1 afl-clang-fast main.c -o fuzz-target -O3 (other options, like using ASan or MSan are possible as well) - Add additional test cases to testcase_dir - Run afl, easiest (but inefficient) way; afl-fuzz -i testcase_dir -o findings_dir ./fuzz-target borgbackup-1.1.15/scripts/fuzz-cache-sync/main.c0000644000175000017500000000130113771325506021470 0ustar useruser00000000000000 #define BORG_NO_PYTHON #include "../../src/borg/_hashindex.c" #include "../../src/borg/cache_sync/cache_sync.c" #define BUFSZ 32768 int main() { char buf[BUFSZ]; int len, ret; CacheSyncCtx *ctx; HashIndex *idx; /* capacity, key size, value size */ idx = hashindex_init(0, 32, 12); ctx = cache_sync_init(idx); while (1) { len = read(0, buf, BUFSZ); if (!len) { break; } ret = cache_sync_feed(ctx, buf, len); if(!ret && cache_sync_error(ctx)) { fprintf(stderr, "error: %s\n", cache_sync_error(ctx)); return 1; } } hashindex_free(idx); cache_sync_free(ctx); return 0; } borgbackup-1.1.15/scripts/fuzz-cache-sync/testcase_dir/0000755000175000017500000000000013771325773023064 5ustar useruser00000000000000borgbackup-1.1.15/scripts/fuzz-cache-sync/testcase_dir/test_simple0000644000175000017500000000022313771325506025326 0ustar useruser00000000000000��foo�bar�baz�ңbar�.�user�chunks�chunks��� 00000000000000000000000000000001�� 00000000000000000000000000000002borgbackup-1.1.15/scripts/upload-pypi0000755000175000017500000000043113771325506017563 0ustar useruser00000000000000#!/bin/bash R=$1 if [ "$R" = "" ]; then echo "Usage: upload-pypi 1.2.3 [test]" exit fi if [ "$2" = "test" ]; then export TWINE_REPOSITORY_URL=https://test.pypi.org/legacy/ else export TWINE_REPOSITORY_URL= fi D=dist/borgbackup-$R.tar.gz twine upload $D.asc $D borgbackup-1.1.15/setup_b2.py0000644000175000017500000000465713771325506016016 0ustar useruser00000000000000# Support code for building a C extension with blake2 files # # Copyright (c) 2016-present, Gregory Szorc (original code for zstd) # 2017-present, Thomas Waldmann (mods to make it more generic, code for blake2) # All rights reserved. # # This software may be modified and distributed under the terms # of the BSD license. See the LICENSE file for details. import os # b2 files, structure as seen in BLAKE2 (reference implementation) project repository: b2_sources = [ 'ref/blake2b-ref.c', ] b2_includes = [ 'ref', ] def b2_system_prefix(prefixes): for prefix in prefixes: filename = os.path.join(prefix, 'include', 'blake2.h') if os.path.exists(filename): with open(filename, 'rb') as fd: if b'blake2b_init' in fd.read(): return prefix def b2_ext_kwargs(bundled_path, system_prefix=None, system=False, **kwargs): """amend kwargs with b2 stuff for a distutils.extension.Extension initialization. bundled_path: relative (to this file) path to the bundled library source code files system_prefix: where the system-installed library can be found system: True: use the system-installed shared library, False: use the bundled library code kwargs: distutils.extension.Extension kwargs that should be amended returns: amended kwargs """ def multi_join(paths, *path_segments): """apply os.path.join on a list of paths""" return [os.path.join(*(path_segments + (path, ))) for path in paths] use_system = system and system_prefix is not None sources = kwargs.get('sources', []) if not use_system: sources += multi_join(b2_sources, bundled_path) include_dirs = kwargs.get('include_dirs', []) if use_system: include_dirs += multi_join(['include'], system_prefix) else: include_dirs += multi_join(b2_includes, bundled_path) library_dirs = kwargs.get('library_dirs', []) if use_system: library_dirs += multi_join(['lib'], system_prefix) libraries = kwargs.get('libraries', []) if use_system: libraries += ['b2', ] extra_compile_args = kwargs.get('extra_compile_args', []) if not use_system: extra_compile_args += [] # not used yet ret = dict(**kwargs) ret.update(dict(sources=sources, extra_compile_args=extra_compile_args, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries)) return ret borgbackup-1.1.15/PKG-INFO0000644000175000017500000002174713771325773015023 0ustar useruser00000000000000Metadata-Version: 2.1 Name: borgbackup Version: 1.1.15 Summary: Deduplicated, encrypted, authenticated and compressed backups Home-page: https://borgbackup.readthedocs.io/ Author: The Borg Collective (see AUTHORS file) Author-email: borgbackup@python.org License: BSD Description: What is BorgBackup? ------------------- BorgBackup (short: Borg) is a deduplicating backup program. Optionally, it supports compression and authenticated encryption. The main goal of Borg is to provide an efficient and secure way to backup data. The data deduplication technique used makes Borg suitable for daily backups since only changes are stored. The authenticated encryption technique makes it suitable for backups to not fully trusted targets. See the `installation manual`_ or, if you have already downloaded Borg, ``docs/installation.rst`` to get started with Borg. There is also an `offline documentation`_ available, in multiple formats. .. _installation manual: https://borgbackup.readthedocs.org/en/stable/installation.html .. _offline documentation: https://readthedocs.org/projects/borgbackup/downloads Main features ~~~~~~~~~~~~~ **Space efficient storage** Deduplication based on content-defined chunking is used to reduce the number of bytes stored: each file is split into a number of variable length chunks and only chunks that have never been seen before are added to the repository. A chunk is considered duplicate if its id_hash value is identical. A cryptographically strong hash or MAC function is used as id_hash, e.g. (hmac-)sha256. To deduplicate, all the chunks in the same repository are considered, no matter whether they come from different machines, from previous backups, from the same backup or even from the same single file. Compared to other deduplication approaches, this method does NOT depend on: * file/directory names staying the same: So you can move your stuff around without killing the deduplication, even between machines sharing a repo. * complete files or time stamps staying the same: If a big file changes a little, only a few new chunks need to be stored - this is great for VMs or raw disks. * The absolute position of a data chunk inside a file: Stuff may get shifted and will still be found by the deduplication algorithm. **Speed** * performance-critical code (chunking, compression, encryption) is implemented in C/Cython * local caching of files/chunks index data * quick detection of unmodified files **Data encryption** All data can be protected using 256-bit AES encryption, data integrity and authenticity is verified using HMAC-SHA256. Data is encrypted clientside. **Compression** All data can be optionally compressed: * lz4 (super fast, low compression) * zstd (wide range from high speed and low compression to high compression and lower speed) * zlib (medium speed and compression) * lzma (low speed, high compression) **Off-site backups** Borg can store data on any remote host accessible over SSH. If Borg is installed on the remote host, big performance gains can be achieved compared to using a network filesystem (sshfs, nfs, ...). **Backups mountable as filesystems** Backup archives are mountable as userspace filesystems for easy interactive backup examination and restores (e.g. by using a regular file manager). **Easy installation on multiple platforms** We offer single-file binaries that do not require installing anything - you can just run them on these platforms: * Linux * Mac OS X * FreeBSD * OpenBSD and NetBSD (no xattrs/ACLs support or binaries yet) * Cygwin (experimental, no binaries yet) * Linux Subsystem of Windows 10 (experimental) **Free and Open Source Software** * security and functionality can be audited independently * licensed under the BSD (3-clause) license, see `License`_ for the complete license Easy to use ~~~~~~~~~~~ Initialize a new backup repository (see ``borg init --help`` for encryption options):: $ borg init -e repokey /path/to/repo Create a backup archive:: $ borg create /path/to/repo::Saturday1 ~/Documents Now doing another backup, just to show off the great deduplication:: $ borg create -v --stats /path/to/repo::Saturday2 ~/Documents ----------------------------------------------------------------------------- Archive name: Saturday2 Archive fingerprint: 622b7c53c... Time (start): Sat, 2016-02-27 14:48:13 Time (end): Sat, 2016-02-27 14:48:14 Duration: 0.88 seconds Number of files: 163 ----------------------------------------------------------------------------- Original size Compressed size Deduplicated size This archive: 6.85 MB 6.85 MB 30.79 kB <-- ! All archives: 13.69 MB 13.71 MB 6.88 MB Unique chunks Total chunks Chunk index: 167 330 ----------------------------------------------------------------------------- For a graphical frontend refer to our complementary project `BorgWeb `_. Helping, Donations and Bounties, becoming a Patron -------------------------------------------------- Your help is always welcome! Spread the word, give feedback, help with documentation, testing or development. You can also give monetary support to the project, see there for details: https://www.borgbackup.org/support/fund.html Links ----- * `Main Web Site `_ * `Releases `_, `PyPI packages `_ and `ChangeLog `_ * `Offline Documentation `_ * `GitHub `_ and `Issue Tracker `_. * `Web-Chat (IRC) `_ and `Mailing List `_ * `License `_ * `Security contact `_ Compatibility notes ------------------- EXPECT THAT WE WILL BREAK COMPATIBILITY REPEATEDLY WHEN MAJOR RELEASE NUMBER CHANGES (like when going from 0.x.y to 1.0.0 or from 1.x.y to 2.0.0). NOT RELEASED DEVELOPMENT VERSIONS HAVE UNKNOWN COMPATIBILITY PROPERTIES. THIS IS SOFTWARE IN DEVELOPMENT, DECIDE YOURSELF WHETHER IT FITS YOUR NEEDS. Security issues should be reported to the `Security contact`_ (or see ``docs/support.rst`` in the source distribution). Platform: Linux Platform: MacOS X Platform: FreeBSD Platform: OpenBSD Platform: NetBSD Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: POSIX :: BSD :: FreeBSD Classifier: Operating System :: POSIX :: BSD :: OpenBSD Classifier: Operating System :: POSIX :: BSD :: NetBSD Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Topic :: Security :: Cryptography Classifier: Topic :: System :: Archiving :: Backup Provides-Extra: fuse borgbackup-1.1.15/docs/0000755000175000017500000000000013771325773014643 5ustar useruser00000000000000borgbackup-1.1.15/docs/usage/0000755000175000017500000000000013771325773015747 5ustar useruser00000000000000borgbackup-1.1.15/docs/usage/diff.rst0000644000175000017500000000147013771325506017405 0ustar useruser00000000000000.. include:: diff.rst.inc Examples ~~~~~~~~ :: $ borg init -e=none testrepo $ mkdir testdir $ cd testdir $ echo asdf > file1 $ dd if=/dev/urandom bs=1M count=4 > file2 $ touch file3 $ borg create ../testrepo::archive1 . $ chmod a+x file1 $ echo "something" >> file2 $ borg create ../testrepo::archive2 . $ rm file3 $ touch file4 $ borg create ../testrepo::archive3 . $ cd .. $ borg diff testrepo::archive1 archive2 [-rw-r--r-- -> -rwxr-xr-x] file1 +135 B -252 B file2 $ borg diff testrepo::archive2 archive3 added 0 B file4 removed 0 B file3 $ borg diff testrepo::archive1 archive3 [-rw-r--r-- -> -rwxr-xr-x] file1 +135 B -252 B file2 added 0 B file4 removed 0 B file3 borgbackup-1.1.15/docs/usage/benchmark.rst0000644000175000017500000000004413771325506020423 0ustar useruser00000000000000.. include:: benchmark_crud.rst.inc borgbackup-1.1.15/docs/usage/recreate.rst.inc0000644000175000017500000007462613771325506021054 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_recreate: borg recreate ------------- .. code-block:: none borg [common options] recreate [options] [REPOSITORY_OR_ARCHIVE] [PATH...] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to recreate | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to recreate; patterns are supported | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--list`` | output verbose list of items (files, dirs, ...) | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--filter STATUSCHARS`` | only display items with the given status characters (listed in borg create --help) | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-n``, ``--dry-run`` | do not change anything | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-s``, ``--stats`` | print statistics at end | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Exclusion options** | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive options** | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--target TARGET`` | create a new archive with the name ARCHIVE, do not replace existing archive (only applies for a single archive) | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-c SECONDS``, ``--checkpoint-interval SECONDS`` | write checkpoint every SECONDS seconds (Default: 1800) | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--comment COMMENT`` | add a comment text to the archive | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--timestamp TIMESTAMP`` | manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). alternatively, give a reference file/directory. | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-C COMPRESSION``, ``--compression COMPRESSION`` | select compression algorithm, see the output of the "borg help compression" command for details. | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--recompress MODE`` | recompress data chunks according to ``--compression``. MODE `if-different`: recompress if current compression is with a different compression algorithm (the level is not considered). MODE `always`: recompress even if current compression is with the same compression algorithm (use this to change the compression level). MODE `never` (default): do not recompress. | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--chunker-params PARAMS`` | specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE) or `default` to use the current defaults. default: 19,23,21,4095 | +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY_OR_ARCHIVE repository or archive to recreate PATH paths to recreate; patterns are supported optional arguments --list output verbose list of items (files, dirs, ...) --filter STATUSCHARS only display items with the given status characters (listed in borg create --help) -n, --dry-run do not change anything -s, --stats print statistics at end :ref:`common_options` | Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) --exclude-if-present NAME exclude directories that are tagged by containing a filesystem object with the given NAME --keep-exclude-tags, --keep-tag-files if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive Archive options --target TARGET create a new archive with the name ARCHIVE, do not replace existing archive (only applies for a single archive) -c SECONDS, --checkpoint-interval SECONDS write checkpoint every SECONDS seconds (Default: 1800) --comment COMMENT add a comment text to the archive --timestamp TIMESTAMP manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). alternatively, give a reference file/directory. -C COMPRESSION, --compression COMPRESSION select compression algorithm, see the output of the "borg help compression" command for details. --recompress MODE recompress data chunks according to ``--compression``. MODE `if-different`: recompress if current compression is with a different compression algorithm (the level is not considered). MODE `always`: recompress even if current compression is with the same compression algorithm (use this to change the compression level). MODE `never` (default): do not recompress. --chunker-params PARAMS specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE) or `default` to use the current defaults. default: 19,23,21,4095 Description ~~~~~~~~~~~ Recreate the contents of existing archives. recreate is a potentially dangerous function and might lead to data loss (if used wrongly). BE VERY CAREFUL! ``--exclude``, ``--exclude-from``, ``--exclude-if-present``, ``--keep-exclude-tags`` and PATH have the exact same semantics as in "borg create", but they only check for files in the archives and not in the local file system. If PATHs are specified, the resulting archives will only contain files from these PATHs. Note that all paths in an archive are relative, therefore absolute patterns/paths will *not* match (``--exclude``, ``--exclude-from``, PATHs). ``--recompress`` allows to change the compression of existing data in archives. Due to how Borg stores compressed size information this might display incorrect information for archives that were not recreated at the same time. There is no risk of data loss by this. ``--chunker-params`` will re-chunk all files in the archive, this can be used to have upgraded Borg 0.xx or Attic archives deduplicate with Borg 1.x archives. **USE WITH CAUTION.** Depending on the PATHs and patterns given, recreate can be used to permanently delete files from archives. When in doubt, use ``--dry-run --verbose --list`` to see how patterns/PATHS are interpreted. See :ref:`list_item_flags` in ``borg create`` for details. The archive being recreated is only removed after the operation completes. The archive that is built during the operation exists at the same time at ".recreate". The new archive will have a different archive ID. With ``--target`` the original archive is not replaced, instead a new archive is created. When rechunking space usage can be substantial, expect at least the entire deduplicated size of the archives using the previous chunker params. When recompressing expect approx. (throughput / checkpoint-interval) in space usage, assuming all chunks are recompressed. If you recently ran borg check --repair and it had to fix lost chunks with all-zero replacement chunks, please first run another backup for the same data and re-run borg check --repair afterwards to heal any archives that had lost chunks which are still generated from the input data. Important: running borg recreate to re-chunk will remove the chunks_healthy metadata of all items with replacement chunks, so healing will not be possible any more after re-chunking (it is also unlikely it would ever work: due to the change of chunking parameters, the missing chunk likely will never be seen again even if you still have the data that produced it).borgbackup-1.1.15/docs/usage/key_import.rst.inc0000644000175000017500000000600613771325506021427 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_key_import: borg key import --------------- .. code-block:: none borg [common options] key import [options] [REPOSITORY] [PATH] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+----------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+----------------+----------------------------------------------------------+ | | ``REPOSITORY`` | | +-------------------------------------------------------+----------------+----------------------------------------------------------+ | | ``PATH`` | path to the backup ('-' to read from stdin) | +-------------------------------------------------------+----------------+----------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+----------------+----------------------------------------------------------+ | | ``--paper`` | interactively import from a backup done with ``--paper`` | +-------------------------------------------------------+----------------+----------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+----------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY PATH path to the backup ('-' to read from stdin) optional arguments --paper interactively import from a backup done with ``--paper`` :ref:`common_options` | Description ~~~~~~~~~~~ This command allows to restore a key previously backed up with the export command. If the ``--paper`` option is given, the import will be an interactive process in which each line is checked for plausibility before proceeding to the next line. For this format PATH must not be given.borgbackup-1.1.15/docs/usage/general.rst0000644000175000017500000000361513771325506020115 0ustar useruser00000000000000General ------- Borg consists of a number of commands. Each command accepts a number of arguments and options and interprets various environment variables. The following sections will describe each command in detail. Commands, options, parameters, paths and such are ``set in fixed-width``. Option values are `underlined`. Borg has few options accepting a fixed set of values (e.g. ``--encryption`` of :ref:`borg_init`). .. container:: experimental Experimental features are marked with red stripes on the sides, like this paragraph. Experimental features are not stable, which means that they may be changed in incompatible ways or even removed entirely without prior notice in following releases. .. include:: usage_general.rst.inc In case you are interested in more details (like formulas), please see :ref:`internals`. For details on the available JSON output, refer to :ref:`json_output`. .. _common_options: Common options ~~~~~~~~~~~~~~ All Borg commands share these options: .. include:: common-options.rst.inc Option ``--bypass-lock`` allows you to access the repository while bypassing borg's locking mechanism. This is necessary if your repository is on a read-only storage where you don't have write permissions or capabilities and therefore cannot create a lock. Examples are repositories stored on a Bluray disc or a read-only network storage. Avoid this option if you are able to use locks as that is the safer way; see the warning below. .. warning:: If you do use ``--bypass-lock``, you are responsible to ensure that no other borg instances have write access to the repository. Otherwise, you might experience errors and read broken data if changes to that repository are being made at the same time. Examples ~~~~~~~~ :: # Create an archive and log: borg version, files list, return code $ borg create --show-version --list --show-rc /path/to/repo::my-files files borgbackup-1.1.15/docs/usage/key.rst0000644000175000017500000000234613771325506017270 0ustar useruser00000000000000.. _borg-change-passphrase: .. include:: key_change-passphrase.rst.inc Examples ~~~~~~~~ :: # Create a key file protected repository $ borg init --encryption=keyfile -v /path/to/repo Initializing repository at "/path/to/repo" Enter new passphrase: Enter same passphrase again: Remember your passphrase. Your data will be inaccessible without it. Key in "/root/.config/borg/keys/mnt_backup" created. Keep this key safe. Your data will be inaccessible without it. Synchronizing chunks cache... Archives: 0, w/ cached Idx: 0, w/ outdated Idx: 0, w/o cached Idx: 0. Done. # Change key file passphrase $ borg key change-passphrase -v /path/to/repo Enter passphrase for key /root/.config/borg/keys/mnt_backup: Enter new passphrase: Enter same passphrase again: Remember your passphrase. Your data will be inaccessible without it. Key updated Fully automated using environment variables: :: $ BORG_NEW_PASSPHRASE=old borg init -e=repokey repo # now "old" is the current passphrase. $ BORG_PASSPHRASE=old BORG_NEW_PASSPHRASE=new borg key change-passphrase repo # now "new" is the current passphrase. .. include:: key_export.rst.inc .. include:: key_import.rst.inc borgbackup-1.1.15/docs/usage/break-lock.rst.inc0000644000175000017500000000350313771325506021256 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_break-lock: borg break-lock --------------- .. code-block:: none borg [common options] break-lock [options] [REPOSITORY] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+-----------------------------------------+ | **positional arguments** | +-------------------------------------------------------+----------------+-----------------------------------------+ | | ``REPOSITORY`` | repository for which to break the locks | +-------------------------------------------------------+----------------+-----------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+-----------------------------------------+ .. raw:: html .. only:: latex REPOSITORY repository for which to break the locks :ref:`common_options` | Description ~~~~~~~~~~~ This command breaks the repository and cache locks. Please use carefully and only while no borg process (on any machine) is trying to access the Cache or the Repository.borgbackup-1.1.15/docs/usage/delete.rst0000644000175000017500000000110313771325506017730 0ustar useruser00000000000000.. include:: delete.rst.inc Examples ~~~~~~~~ :: # delete a single backup archive: $ borg delete /path/to/repo::Monday # delete the whole repository and the related local cache: $ borg delete /path/to/repo You requested to completely DELETE the repository *including* all archives it contains: repo Mon, 2016-02-15 19:26:54 root-2016-02-15 Mon, 2016-02-15 19:36:29 newname Mon, 2016-02-15 19:50:19 Type 'YES' if you understand this and want to continue: YES borgbackup-1.1.15/docs/usage/create.rst.inc0000644000175000017500000010370413771325506020513 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_create: borg create ----------- .. code-block:: none borg [common options] create [options] ARCHIVE [PATH...] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``ARCHIVE`` | name of archive to create (must be also a valid directory name) | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-n``, ``--dry-run`` | do not create a backup archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-s``, ``--stats`` | print statistics for the created archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--list`` | output verbose list of items (files, dirs, ...) | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--filter STATUSCHARS`` | only display items with the given status characters (see description) | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--json`` | output stats as JSON. Implies ``--stats``. | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--no-cache-sync`` | experimental: do not synchronize the cache. Implies not using the files cache. | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--no-files-cache`` | do not load/update the file metadata cache used to detect unchanged files | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--stdin-name NAME`` | use NAME in archive for stdin data (default: "stdin") | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--stdin-user USER`` | set user USER in archive for stdin data (default: 'root') | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--stdin-group GROUP`` | set group GROUP in archive for stdin data (default: 'root') | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--stdin-mode M`` | set mode to M in archive for stdin data (default: 0660) | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Exclusion options** | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-nodump`` | exclude files flagged NODUMP | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Filesystem options** | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-x``, ``--one-file-system`` | stay in the same file system and do not store mount points of other file systems. This might behave different from your expectations, see the docs. | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--numeric-owner`` | only store numeric user and group identifiers | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--noatime`` | do not store atime into archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--noctime`` | do not store ctime into archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--nobirthtime`` | do not store birthtime (creation date) into archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--nobsdflags`` | do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--ignore-inode`` | ignore inode data in the file metadata cache used to detect unchanged files. | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--files-cache MODE`` | operate files cache in MODE. default: ctime,size,inode | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--read-special`` | open and read block and char device files as well as FIFOs as if they were regular files. Also follows symlinks pointing to these kinds of files. | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive options** | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--comment COMMENT`` | add a comment text to the archive | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--timestamp TIMESTAMP`` | manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). Alternatively, give a reference file/directory. | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-c SECONDS``, ``--checkpoint-interval SECONDS`` | write checkpoint every SECONDS seconds (Default: 1800) | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--chunker-params PARAMS`` | specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE). default: 19,23,21,4095 | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-C COMPRESSION``, ``--compression COMPRESSION`` | select compression algorithm, see the output of the "borg help compression" command for details. | +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex ARCHIVE name of archive to create (must be also a valid directory name) PATH paths to archive optional arguments -n, --dry-run do not create a backup archive -s, --stats print statistics for the created archive --list output verbose list of items (files, dirs, ...) --filter STATUSCHARS only display items with the given status characters (see description) --json output stats as JSON. Implies ``--stats``. --no-cache-sync experimental: do not synchronize the cache. Implies not using the files cache. --no-files-cache do not load/update the file metadata cache used to detect unchanged files --stdin-name NAME use NAME in archive for stdin data (default: "stdin") --stdin-user USER set user USER in archive for stdin data (default: 'root') --stdin-group GROUP set group GROUP in archive for stdin data (default: 'root') --stdin-mode M set mode to M in archive for stdin data (default: 0660) :ref:`common_options` | Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) --exclude-if-present NAME exclude directories that are tagged by containing a filesystem object with the given NAME --keep-exclude-tags, --keep-tag-files if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive --exclude-nodump exclude files flagged NODUMP Filesystem options -x, --one-file-system stay in the same file system and do not store mount points of other file systems. This might behave different from your expectations, see the docs. --numeric-owner only store numeric user and group identifiers --noatime do not store atime into archive --noctime do not store ctime into archive --nobirthtime do not store birthtime (creation date) into archive --nobsdflags do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive --ignore-inode ignore inode data in the file metadata cache used to detect unchanged files. --files-cache MODE operate files cache in MODE. default: ctime,size,inode --read-special open and read block and char device files as well as FIFOs as if they were regular files. Also follows symlinks pointing to these kinds of files. Archive options --comment COMMENT add a comment text to the archive --timestamp TIMESTAMP manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). Alternatively, give a reference file/directory. -c SECONDS, --checkpoint-interval SECONDS write checkpoint every SECONDS seconds (Default: 1800) --chunker-params PARAMS specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE). default: 19,23,21,4095 -C COMPRESSION, --compression COMPRESSION select compression algorithm, see the output of the "borg help compression" command for details. Description ~~~~~~~~~~~ This command creates a backup archive containing all files found while recursively traversing all paths specified. Paths are added to the archive as they are given, that means if relative paths are desired, the command has to be run from the correct directory. When giving '-' as path, borg will read data from standard input and create a file 'stdin' in the created archive from that data. See section *Reading from stdin* below for details. The archive will consume almost no disk space for files or parts of files that have already been stored in other archives. The archive name needs to be unique. It must not end in '.checkpoint' or '.checkpoint.N' (with N being a number), because these names are used for checkpoints and treated in special ways. In the archive name, you may use the following placeholders: {now}, {utcnow}, {fqdn}, {hostname}, {user} and some others. Backup speed is increased by not reprocessing files that are already part of existing archives and weren't modified. The detection of unmodified files is done by comparing multiple file metadata values with previous values kept in the files cache. This comparison can operate in different modes as given by ``--files-cache``: - ctime,size,inode (default) - mtime,size,inode (default behaviour of borg versions older than 1.1.0rc4) - ctime,size (ignore the inode number) - mtime,size (ignore the inode number) - rechunk,ctime (all files are considered modified - rechunk, cache ctime) - rechunk,mtime (all files are considered modified - rechunk, cache mtime) - disabled (disable the files cache, all files considered modified - rechunk) inode number: better safety, but often unstable on network filesystems Normally, detecting file modifications will take inode information into consideration to improve the reliability of file change detection. This is problematic for files located on sshfs and similar network file systems which do not provide stable inode numbers, such files will always be considered modified. You can use modes without `inode` in this case to improve performance, but reliability of change detection might be reduced. ctime vs. mtime: safety vs. speed - ctime is a rather safe way to detect changes to a file (metadata and contents) as it can not be set from userspace. But, a metadata-only change will already update the ctime, so there might be some unnecessary chunking/hashing even without content changes. Some filesystems do not support ctime (change time). E.g. doing a chown or chmod to a file will change its ctime. - mtime usually works and only updates if file contents were changed. But mtime can be arbitrarily set from userspace, e.g. to set mtime back to the same value it had before a content change happened. This can be used maliciously as well as well-meant, but in both cases mtime based cache modes can be problematic. The mount points of filesystems or filesystem snapshots should be the same for every creation of a new archive to ensure fast operation. This is because the file cache that is used to determine changed files quickly uses absolute filenames. If this is not possible, consider creating a bind mount to a stable location. The ``--progress`` option shows (from left to right) Original, Compressed and Deduplicated (O, C and D, respectively), then the Number of files (N) processed so far, followed by the currently processed path. When using ``--stats``, you will get some statistics about how much data was added - the "This Archive" deduplicated size there is most interesting as that is how much your repository will grow. Please note that the "All archives" stats refer to the state after creation. Also, the ``--stats`` and ``--dry-run`` options are mutually exclusive because the data is not actually compressed and deduplicated during a dry run. See the output of the "borg help patterns" command for more help on exclude patterns. See the output of the "borg help placeholders" command for more help on placeholders. .. man NOTES The ``--exclude`` patterns are not like tar. In tar ``--exclude`` .bundler/gems will exclude foo/.bundler/gems. In borg it will not, you need to use ``--exclude`` '\*/.bundler/gems' to get the same effect. See ``borg help patterns`` for more information. In addition to using ``--exclude`` patterns, it is possible to use ``--exclude-if-present`` to specify the name of a filesystem object (e.g. a file or folder name) which, when contained within another folder, will prevent the containing folder from being backed up. By default, the containing folder and all of its contents will be omitted from the backup. If, however, you wish to only include the objects specified by ``--exclude-if-present`` in your backup, and not include any other contents of the containing folder, this can be enabled through using the ``--keep-exclude-tags`` option. The ``-x`` or ``--one-file-system`` option excludes directories, that are mountpoints (and everything in them). It detects mountpoints by comparing the device number from the output of ``stat()`` of the directory and its parent directory. Specifically, it excludes directories for which ``stat()`` reports a device number different from the device number of their parent. Be aware that in Linux (and possibly elsewhere) there are directories with device number different from their parent, which the kernel does not consider a mountpoint and also the other way around. Examples are bind mounts (possibly same device number, but always a mountpoint) and ALL subvolumes of a btrfs (different device number from parent but not necessarily a mountpoint). Therefore when using ``--one-file-system``, one should make doubly sure that the backup works as intended especially when using btrfs. This is even more important, if the btrfs layout was created by someone else, e.g. a distribution installer. .. _list_item_flags: Item flags ++++++++++ ``--list`` outputs a list of all files, directories and other file system items it considered (no matter whether they had content changes or not). For each item, it prefixes a single-letter flag that indicates type and/or status of the item. If you are interested only in a subset of that output, you can give e.g. ``--filter=AME`` and it will only show regular files with A, M or E status (see below). A uppercase character represents the status of a regular file relative to the "files" cache (not relative to the repo -- this is an issue if the files cache is not used). Metadata is stored in any case and for 'A' and 'M' also new data chunks are stored. For 'U' all data chunks refer to already existing chunks. - 'A' = regular file, added (see also :ref:`a_status_oddity` in the FAQ) - 'M' = regular file, modified - 'U' = regular file, unchanged - 'E' = regular file, an error happened while accessing/reading *this* file A lowercase character means a file type other than a regular file, borg usually just stores their metadata: - 'd' = directory - 'b' = block device - 'c' = char device - 'h' = regular file, hardlink (to already seen inodes) - 's' = symlink - 'f' = fifo Other flags used include: - 'i' = backup data was read from standard input (stdin) - '-' = dry run, item was *not* backed up - 'x' = excluded, item was *not* backed up - '?' = missing status code (if you see this, please file a bug report!) Reading from stdin ++++++++++++++++++ To read from stdin, specify ``-`` as path and pipe directly to borg:: backup-vm --id myvm --stdout | borg create REPO::ARCHIVE - Note that piping to borg creates an archive even if the command piping to borg exits with a failure. In this case, **one can end up with truncated output being backed up**. Reading from stdin yields just a stream of data without file metadata associated with it, and the files cache is not needed at all. So it is safe to disable it via ``--no-files-cache`` and speed up backup creation a bit. By default, the content read from stdin is stored in a file called 'stdin'. Use ``--stdin-name`` to change the name.borgbackup-1.1.15/docs/usage/notes.rst0000644000175000017500000002703613771325506017633 0ustar useruser00000000000000Additional Notes ---------------- Here are misc. notes about topics that are maybe not covered in enough detail in the usage section. .. _chunker-params: ``--chunker-params`` ~~~~~~~~~~~~~~~~~~~~ The chunker params influence how input files are cut into pieces (chunks) which are then considered for deduplication. They also have a big impact on resource usage (RAM and disk space) as the amount of resources needed is (also) determined by the total amount of chunks in the repository (see :ref:`cache-memory-usage` for details). ``--chunker-params=10,23,16,4095`` results in a fine-grained deduplication| and creates a big amount of chunks and thus uses a lot of resources to manage them. This is good for relatively small data volumes and if the machine has a good amount of free RAM and disk space. ``--chunker-params=19,23,21,4095`` (default) results in a coarse-grained deduplication and creates a much smaller amount of chunks and thus uses less resources. This is good for relatively big data volumes and if the machine has a relatively low amount of free RAM and disk space. If you already have made some archives in a repository and you then change chunker params, this of course impacts deduplication as the chunks will be cut differently. In the worst case (all files are big and were touched in between backups), this will store all content into the repository again. Usually, it is not that bad though: - usually most files are not touched, so it will just re-use the old chunks it already has in the repo - files smaller than the (both old and new) minimum chunksize result in only one chunk anyway, so the resulting chunks are same and deduplication will apply If you switch chunker params to save resources for an existing repo that already has some backup archives, you will see an increasing effect over time, when more and more files have been touched and stored again using the bigger chunksize **and** all references to the smaller older chunks have been removed (by deleting / pruning archives). If you want to see an immediate big effect on resource usage, you better start a new repository when changing chunker params. For more details, see :ref:`chunker_details`. ``--noatime / --noctime`` ~~~~~~~~~~~~~~~~~~~~~~~~~ You can use these ``borg create`` options to not store the respective timestamp into the archive, in case you do not really need it. Besides saving a little space for the not archived timestamp, it might also affect metadata stream deduplication: if only this timestamp changes between backups and is stored into the metadata stream, the metadata stream chunks won't deduplicate just because of that. ``--nobsdflags`` ~~~~~~~~~~~~~~~~ You can use this to not query and store (or not extract and set) bsdflags - in case you don't need them or if they are broken somehow for your fs. On Linux, dealing with the bsflags needs some additional syscalls. Especially when dealing with lots of small files, this causes a noticable overhead, so you can use this option also for speeding up operations. ``--umask`` ~~~~~~~~~~~ If you use ``--umask``, make sure that all repository-modifying borg commands (create, delete, prune) that access the repository in question use the same ``--umask`` value. If multiple machines access the same repository, this should hold true for all of them. ``--read-special`` ~~~~~~~~~~~~~~~~~~ The ``--read-special`` option is special - you do not want to use it for normal full-filesystem backups, but rather after carefully picking some targets for it. The option ``--read-special`` triggers special treatment for block and char device files as well as FIFOs. Instead of storing them as such a device (or FIFO), they will get opened, their content will be read and in the backup archive they will show up like a regular file. Symlinks will also get special treatment if (and only if) they point to such a special file: instead of storing them as a symlink, the target special file will get processed as described above. One intended use case of this is backing up the contents of one or multiple block devices, like e.g. LVM snapshots or inactive LVs or disk partitions. You need to be careful about what you include when using ``--read-special``, e.g. if you include ``/dev/zero``, your backup will never terminate. Restoring such files' content is currently only supported one at a time via ``--stdout`` option (and you have to redirect stdout to where ever it shall go, maybe directly into an existing device file of your choice or indirectly via ``dd``). To some extent, mounting a backup archive with the backups of special files via ``borg mount`` and then loop-mounting the image files from inside the mount point will work. If you plan to access a lot of data in there, it likely will scale and perform better if you do not work via the FUSE mount. Example +++++++ Imagine you have made some snapshots of logical volumes (LVs) you want to backup. .. note:: For some scenarios, this is a good method to get "crash-like" consistency (I call it crash-like because it is the same as you would get if you just hit the reset button or your machine would abruptly and completely crash). This is better than no consistency at all and a good method for some use cases, but likely not good enough if you have databases running. Then you create a backup archive of all these snapshots. The backup process will see a "frozen" state of the logical volumes, while the processes working in the original volumes continue changing the data stored there. You also add the output of ``lvdisplay`` to your backup, so you can see the LV sizes in case you ever need to recreate and restore them. After the backup has completed, you remove the snapshots again. :: $ # create snapshots here $ lvdisplay > lvdisplay.txt $ borg create --read-special /path/to/repo::arch lvdisplay.txt /dev/vg0/*-snapshot $ # remove snapshots here Now, let's see how to restore some LVs from such a backup. :: $ borg extract /path/to/repo::arch lvdisplay.txt $ # create empty LVs with correct sizes here (look into lvdisplay.txt). $ # we assume that you created an empty root and home LV and overwrite it now: $ borg extract --stdout /path/to/repo::arch dev/vg0/root-snapshot > /dev/vg0/root $ borg extract --stdout /path/to/repo::arch dev/vg0/home-snapshot > /dev/vg0/home .. _append_only_mode: Append-only mode ~~~~~~~~~~~~~~~~ A repository can be made "append-only", which means that Borg will never overwrite or delete committed data (append-only refers to the segment files, but borg will also reject to delete the repository completely). This is useful for scenarios where a backup client machine backups remotely to a backup server using ``borg serve``, since a hacked client machine cannot delete backups on the server permanently. To activate append-only mode, set ``append_only`` to 1 in the repository config: :: borg config /path/to/repo append_only 1 Note that you can go back-and-forth between normal and append-only operation with ``borg config``; it's not a "one way trip." In append-only mode Borg will create a transaction log in the ``transactions`` file, where each line is a transaction and a UTC timestamp. In addition, ``borg serve`` can act as if a repository is in append-only mode with its option ``--append-only``. This can be very useful for fine-tuning access control in ``.ssh/authorized_keys``: :: command="borg serve --append-only ..." ssh-rsa command="borg serve ..." ssh-rsa Running ``borg init`` via a ``borg serve --append-only`` server will *not* create an append-only repository. Running ``borg init --append-only`` creates an append-only repository regardless of server settings. Example +++++++ Suppose an attacker remotely deleted all backups, but your repository was in append-only mode. A transaction log in this situation might look like this: :: transaction 1, UTC time 2016-03-31T15:53:27.383532 transaction 5, UTC time 2016-03-31T15:53:52.588922 transaction 11, UTC time 2016-03-31T15:54:23.887256 transaction 12, UTC time 2016-03-31T15:55:54.022540 transaction 13, UTC time 2016-03-31T15:55:55.472564 From your security logs you conclude the attacker gained access at 15:54:00 and all the backups where deleted or replaced by compromised backups. From the log you know that transactions 11 and later are compromised. Note that the transaction ID is the name of the *last* file in the transaction. For example, transaction 11 spans files 6 to 11. In a real attack you'll likely want to keep the compromised repository intact to analyze what the attacker tried to achieve. It's also a good idea to make this copy just in case something goes wrong during the recovery. Since recovery is done by deleting some files, a hard link copy (``cp -al``) is sufficient. The first step to reset the repository to transaction 5, the last uncompromised transaction, is to remove the ``hints.N``, ``index.N`` and ``integrity.N`` files in the repository (these files are always expendable). In this example N is 13. Then remove or move all segment files from the segment directories in ``data/`` starting with file 6:: rm data/**/{6..13} That's all to do in the repository. If you want to access this rollbacked repository from a client that already has a cache for this repository, the cache will reflect a newer repository state than what you actually have in the repository now, after the rollback. Thus, you need to clear the cache:: borg delete --cache-only repo The cache will get rebuilt automatically. Depending on repo size and archive count, it may take a while. You also will need to remove ~/.config/borg/security/REPOID/manifest-timestamp. Drawbacks +++++++++ As data is only appended, and nothing removed, commands like ``prune`` or ``delete`` won't free disk space, they merely tag data as deleted in a new transaction. Be aware that as soon as you write to the repo in non-append-only mode (e.g. prune, delete or create archives from an admin machine), it will remove the deleted objects permanently (including the ones that were already marked as deleted, but not removed, in append-only mode). Automated edits to the repository (such as a cron job running ``borg prune``) will render append-only mode moot if data is deleted. Even if an archive appears to be available, it is possible an attacker could delete just a few chunks from an archive and silently corrupt its data. While in append-only mode, this is reversible, but ``borg check`` should be run before a writing/pruning operation on an append-only repository to catch accidental or malicious corruption:: # run without append-only mode borg check --verify-data repo Aside from checking repository & archive integrity you may want to also manually check backups to ensure their content seems correct. Further considerations ++++++++++++++++++++++ Append-only mode is not respected by tools other than Borg. ``rm`` still works on the repository. Make sure that backup client machines only get to access the repository via ``borg serve``. Ensure that no remote access is possible if the repository is temporarily set to normal mode for e.g. regular pruning. Further protections can be implemented, but are outside of Borg's scope. For example, file system snapshots or wrapping ``borg serve`` to set special permissions or ACLs on new data files. SSH batch mode ~~~~~~~~~~~~~~ When running Borg using an automated script, ``ssh`` might still ask for a password, even if there is an SSH key for the target server. Use this to make scripts more robust:: export BORG_RSH='ssh -oBatchMode=yes' borgbackup-1.1.15/docs/usage/recreate.rst0000644000175000017500000000247013771325506020270 0ustar useruser00000000000000.. include:: recreate.rst.inc Examples ~~~~~~~~ :: # Make old (Attic / Borg 0.xx) archives deduplicate with Borg 1.x archives. # Archives created with Borg 1.1+ and the default chunker params are skipped # (archive ID stays the same). $ borg recreate /mnt/backup --chunker-params default --progress # Create a backup with little but fast compression $ borg create /mnt/backup::archive /some/files --compression lz4 # Then compress it - this might take longer, but the backup has already completed, # so no inconsistencies from a long-running backup job. $ borg recreate /mnt/backup::archive --recompress --compression zlib,9 # Remove unwanted files from all archives in a repository. # Note the relative path for the --exclude option - archives only contain relative paths. $ borg recreate /mnt/backup --exclude home/icke/Pictures/drunk_photos # Change archive comment $ borg create --comment "This is a comment" /mnt/backup::archivename ~ $ borg info /mnt/backup::archivename Name: archivename Fingerprint: ... Comment: This is a comment ... $ borg recreate --comment "This is a better comment" /mnt/backup::archivename $ borg info /mnt/backup::archivename Name: archivename Fingerprint: ... Comment: This is a better comment ... borgbackup-1.1.15/docs/usage/init.rst.inc0000644000175000017500000002334313771325506020213 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_init: borg init --------- .. code-block:: none borg [common options] init [options] [REPOSITORY] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | | ``REPOSITORY`` | repository to create | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | | ``-e MODE``, ``--encryption MODE`` | select encryption key mode **(required)** | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | | ``--append-only`` | create an append-only mode repository | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | | ``--storage-quota QUOTA`` | Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota. | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | | ``--make-parent-dirs`` | create the parent directories of the repository directory, if they are missing. | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY repository to create optional arguments -e MODE, --encryption MODE select encryption key mode **(required)** --append-only create an append-only mode repository --storage-quota QUOTA Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota. --make-parent-dirs create the parent directories of the repository directory, if they are missing. :ref:`common_options` | Description ~~~~~~~~~~~ This command initializes an empty repository. A repository is a filesystem directory containing the deduplicated data from zero or more archives. Encryption can be enabled at repository init time. It cannot be changed later. It is not recommended to work without encryption. Repository encryption protects you e.g. against the case that an attacker has access to your backup repository. Borg relies on randomly generated key material and uses that for chunking, id generation, encryption and authentication. The key material is encrypted using the passphrase you give before it is stored on-disk. You need to be careful with the key / the passphrase: If you want "passphrase-only" security, use one of the repokey modes. The key will be stored inside the repository (in its "config" file). In above mentioned attack scenario, the attacker will have the key (but not the passphrase). If you want "passphrase and having-the-key" security, use one of the keyfile modes. The key will be stored in your home directory (in .config/borg/keys). In the attack scenario, the attacker who has just access to your repo won't have the key (and also not the passphrase). Make a backup copy of the key file (keyfile mode) or repo config file (repokey mode) and keep it at a safe place, so you still have the key in case it gets corrupted or lost. Also keep the passphrase at a safe place. The backup that is encrypted with that key won't help you with that, of course. Make sure you use a good passphrase. Not too short, not too simple. The real encryption / decryption key is encrypted with / locked by your passphrase. If an attacker gets your key, he can't unlock and use it without knowing the passphrase. Be careful with special or non-ascii characters in your passphrase: - Borg processes the passphrase as unicode (and encodes it as utf-8), so it does not have problems dealing with even the strangest characters. - BUT: that does not necessarily apply to your OS / VM / keyboard configuration. So better use a long passphrase made from simple ascii chars than one that includes non-ascii stuff or characters that are hard/impossible to enter on a different keyboard layout. You can change your passphrase for existing repos at any time, it won't affect the encryption/decryption key or other secrets. Encryption modes ++++++++++++++++ You can choose from the encryption modes seen in the table below on a per-repo basis. The mode determines encryption algorithm, hash/MAC algorithm and also the key storage location. Example: `borg init --encryption repokey ...` .. nanorst: inline-fill +----------+---------------+------------------------+--------------------------+ | Hash/MAC | Not encrypted | Not encrypted, | Encrypted (AEAD w/ AES) | | | no auth | but authenticated | and authenticated | +----------+---------------+------------------------+--------------------------+ | SHA-256 | none | `authenticated` | repokey | | | | | keyfile | +----------+---------------+------------------------+--------------------------+ | BLAKE2b | n/a | `authenticated-blake2` | `repokey-blake2` | | | | | `keyfile-blake2` | +----------+---------------+------------------------+--------------------------+ .. nanorst: inline-replace Modes `marked like this` in the above table are new in Borg 1.1 and are not backwards-compatible with Borg 1.0.x. On modern Intel/AMD CPUs (except very cheap ones), AES is usually hardware-accelerated. BLAKE2b is faster than SHA256 on Intel/AMD 64-bit CPUs (except AMD Ryzen and future CPUs with SHA extensions), which makes `authenticated-blake2` faster than `none` and `authenticated`. On modern ARM CPUs, NEON provides hardware acceleration for SHA256 making it faster than BLAKE2b-256 there. NEON accelerates AES as well. Hardware acceleration is always used automatically when available. `repokey` and `keyfile` use AES-CTR-256 for encryption and HMAC-SHA256 for authentication in an encrypt-then-MAC (EtM) construction. The chunk ID hash is HMAC-SHA256 as well (with a separate key). These modes are compatible with Borg 1.0.x. `repokey-blake2` and `keyfile-blake2` are also authenticated encryption modes, but use BLAKE2b-256 instead of HMAC-SHA256 for authentication. The chunk ID hash is a keyed BLAKE2b-256 hash. These modes are new and *not* compatible with Borg 1.0.x. `authenticated` mode uses no encryption, but authenticates repository contents through the same HMAC-SHA256 hash as the `repokey` and `keyfile` modes (it uses it as the chunk ID hash). The key is stored like `repokey`. This mode is new and *not* compatible with Borg 1.0.x. `authenticated-blake2` is like `authenticated`, but uses the keyed BLAKE2b-256 hash from the other blake2 modes. This mode is new and *not* compatible with Borg 1.0.x. `none` mode uses no encryption and no authentication. It uses SHA256 as chunk ID hash. This mode is not recommended, you should rather consider using an authenticated or authenticated/encrypted mode. This mode has possible denial-of-service issues when running ``borg create`` on contents controlled by an attacker. Use it only for new repositories where no encryption is wanted **and** when compatibility with 1.0.x is important. If compatibility with 1.0.x is not important, use `authenticated-blake2` or `authenticated` instead. This mode is compatible with Borg 1.0.x.borgbackup-1.1.15/docs/usage/extract.rst.inc0000644000175000017500000002400613771325506020717 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_extract: borg extract ------------ .. code-block:: none borg [common options] extract [options] ARCHIVE [PATH...] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``ARCHIVE`` | archive to extract | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to extract; patterns are supported | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--list`` | output verbose list of items (files, dirs, ...) | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``-n``, ``--dry-run`` | do not actually change any files | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--numeric-owner`` | only obey numeric user and group identifiers | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--nobsdflags`` | do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE) | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--stdout`` | write all extracted data to stdout | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--sparse`` | create holes in output sparse file from all-zero chunks | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | **Exclusion options** | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--strip-components NUMBER`` | Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex ARCHIVE archive to extract PATH paths to extract; patterns are supported optional arguments --list output verbose list of items (files, dirs, ...) -n, --dry-run do not actually change any files --numeric-owner only obey numeric user and group identifiers --nobsdflags do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE) --stdout write all extracted data to stdout --sparse create holes in output sparse file from all-zero chunks :ref:`common_options` | Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line --strip-components NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. Description ~~~~~~~~~~~ This command extracts the contents of an archive. By default the entire archive is extracted but a subset of files and directories can be selected by passing a list of ``PATHs`` as arguments. The file selection can further be restricted by using the ``--exclude`` option. See the output of the "borg help patterns" command for more help on exclude patterns. By using ``--dry-run``, you can do all extraction steps except actually writing the output data: reading metadata and data chunks from the repo, checking the hash/hmac, decrypting, decompressing. ``--progress`` can be slower than no progress display, since it makes one additional pass over the archive metadata. .. note:: Currently, extract always writes into the current working directory ("."), so make sure you ``cd`` to the right place before calling ``borg extract``.borgbackup-1.1.15/docs/usage/create.rst0000644000175000017500000000546613771325506017751 0ustar useruser00000000000000.. include:: create.rst.inc Examples ~~~~~~~~ :: # Backup ~/Documents into an archive named "my-documents" $ borg create /path/to/repo::my-documents ~/Documents # same, but list all files as we process them $ borg create --list /path/to/repo::my-documents ~/Documents # Backup ~/Documents and ~/src but exclude pyc files $ borg create /path/to/repo::my-files \ ~/Documents \ ~/src \ --exclude '*.pyc' # Backup home directories excluding image thumbnails (i.e. only # /home//.thumbnails is excluded, not /home/*/*/.thumbnails etc.) $ borg create /path/to/repo::my-files /home \ --exclude 'sh:/home/*/.thumbnails' # Backup the root filesystem into an archive named "root-YYYY-MM-DD" # use zlib compression (good, but slow) - default is lz4 (fast, low compression ratio) $ borg create -C zlib,6 --one-file-system /path/to/repo::root-{now:%Y-%m-%d} / # Backup a remote host locally ("pull" style) using sshfs $ mkdir sshfs-mount $ sshfs root@example.com:/ sshfs-mount $ cd sshfs-mount $ borg create /path/to/repo::example.com-root-{now:%Y-%m-%d} . $ cd .. $ fusermount -u sshfs-mount # Make a big effort in fine granular deduplication (big chunk management # overhead, needs a lot of RAM and disk space, see formula in internals # docs - same parameters as borg < 1.0 or attic): $ borg create --chunker-params 10,23,16,4095 /path/to/repo::small /smallstuff # Backup a raw device (must not be active/in use/mounted at that time) $ dd if=/dev/sdx bs=10M | borg create /path/to/repo::my-sdx - # No compression (none) $ borg create --compression none /path/to/repo::arch ~ # Super fast, low compression (lz4, default) $ borg create /path/to/repo::arch ~ # Less fast, higher compression (zlib, N = 0..9) $ borg create --compression zlib,N /path/to/repo::arch ~ # Even slower, even higher compression (lzma, N = 0..9) $ borg create --compression lzma,N /path/to/repo::arch ~ # Only compress compressible data with lzma,N (N = 0..9) $ borg create --compression auto,lzma,N /path/to/repo::arch ~ # Use short hostname, user name and current time in archive name $ borg create /path/to/repo::{hostname}-{user}-{now} ~ # Similar, use the same datetime format that is default as of borg 1.1 $ borg create /path/to/repo::{hostname}-{user}-{now:%Y-%m-%dT%H:%M:%S} ~ # As above, but add nanoseconds $ borg create /path/to/repo::{hostname}-{user}-{now:%Y-%m-%dT%H:%M:%S.%f} ~ # Backing up relative paths by moving into the correct directory first $ cd /home/user/Documents # The root directory of the archive will be "projectA" $ borg create /path/to/repo::daily-projectA-{now:%Y-%m-%d} projectA borgbackup-1.1.15/docs/usage/prune.rst0000644000175000017500000000305613771325506017630 0ustar useruser00000000000000.. include:: prune.rst.inc Examples ~~~~~~~~ Be careful, prune is a potentially dangerous command, it will remove backup archives. The default of prune is to apply to **all archives in the repository** unless you restrict its operation to a subset of the archives using ``--prefix``. When using ``--prefix``, be careful to choose a good prefix - e.g. do not use a prefix "foo" if you do not also want to match "foobar". It is strongly recommended to always run ``prune -v --list --dry-run ...`` first so you will see what it would do without it actually doing anything. :: # Keep 7 end of day and 4 additional end of week archives. # Do a dry-run without actually deleting anything. $ borg prune -v --list --dry-run --keep-daily=7 --keep-weekly=4 /path/to/repo # Same as above but only apply to archive names starting with the hostname # of the machine followed by a "-" character: $ borg prune -v --list --keep-daily=7 --keep-weekly=4 --prefix='{hostname}-' /path/to/repo # Keep 7 end of day, 4 additional end of week archives, # and an end of month archive for every month: $ borg prune -v --list --keep-daily=7 --keep-weekly=4 --keep-monthly=-1 /path/to/repo # Keep all backups in the last 10 days, 4 additional end of week archives, # and an end of month archive for every month: $ borg prune -v --list --keep-within=10d --keep-weekly=4 --keep-monthly=-1 /path/to/repo There is also a visualized prune example in ``docs/misc/prune-example.txt``: .. highlight:: none .. include:: ../misc/prune-example.txt :literal: borgbackup-1.1.15/docs/usage/export-tar.rst.inc0000644000175000017500000002205213771325506021351 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_export-tar: borg export-tar --------------- .. code-block:: none borg [common options] export-tar [options] ARCHIVE FILE [PATH...] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``ARCHIVE`` | archive to export | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``FILE`` | output tar file. "-" to write to stdout instead. | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to extract; patterns are supported | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--tar-filter`` | filter program to pipe data through | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--list`` | output verbose list of items (files, dirs, ...) | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | **Exclusion options** | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--strip-components NUMBER`` | Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex ARCHIVE archive to export FILE output tar file. "-" to write to stdout instead. PATH paths to extract; patterns are supported optional arguments --tar-filter filter program to pipe data through --list output verbose list of items (files, dirs, ...) :ref:`common_options` | Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line --strip-components NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. Description ~~~~~~~~~~~ This command creates a tarball from an archive. When giving '-' as the output FILE, Borg will write a tar stream to standard output. By default (``--tar-filter=auto``) Borg will detect whether the FILE should be compressed based on its file extension and pipe the tarball through an appropriate filter before writing it to FILE: - .tar.gz: gzip - .tar.bz2: bzip2 - .tar.xz: xz Alternatively a ``--tar-filter`` program may be explicitly specified. It should read the uncompressed tar stream from stdin and write a compressed/filtered tar stream to stdout. The generated tarball uses the GNU tar format. export-tar is a lossy conversion: BSD flags, ACLs, extended attributes (xattrs), atime and ctime are not exported. Timestamp resolution is limited to whole seconds, not the nanosecond resolution otherwise supported by Borg. A ``--sparse`` option (as found in borg extract) is not supported. By default the entire archive is extracted but a subset of files and directories can be selected by passing a list of ``PATHs`` as arguments. The file selection can further be restricted by using the ``--exclude`` option. See the output of the "borg help patterns" command for more help on exclude patterns. ``--progress`` can be slower than no progress display, since it makes one additional pass over the archive metadata.borgbackup-1.1.15/docs/usage/upgrade.rst.inc0000644000175000017500000002003413771325506020671 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_upgrade: borg upgrade ------------ .. code-block:: none borg [common options] upgrade [options] [REPOSITORY] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | | ``REPOSITORY`` | path to the repository to be upgraded | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | | ``-n``, ``--dry-run`` | do not change repository | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | | ``--inplace`` | rewrite repository in place, with no chance of going back to older versions of the repository. | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | | ``--force`` | Force upgrade | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | | ``--tam`` | Enable manifest authentication (in key and cache) (Borg 1.0.9 and later). | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | | ``--disable-tam`` | Disable manifest authentication (in key and cache). | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+-----------------------+------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY path to the repository to be upgraded optional arguments -n, --dry-run do not change repository --inplace rewrite repository in place, with no chance of going back to older versions of the repository. --force Force upgrade --tam Enable manifest authentication (in key and cache) (Borg 1.0.9 and later). --disable-tam Disable manifest authentication (in key and cache). :ref:`common_options` | Description ~~~~~~~~~~~ Upgrade an existing, local Borg repository. When you do not need borg upgrade +++++++++++++++++++++++++++++++++ Not every change requires that you run ``borg upgrade``. You do **not** need to run it when: - moving your repository to a different place - upgrading to another point release (like 1.0.x to 1.0.y), except when noted otherwise in the changelog - upgrading from 1.0.x to 1.1.x, except when noted otherwise in the changelog Borg 1.x.y upgrades +++++++++++++++++++ Use ``borg upgrade --tam REPO`` to require manifest authentication introduced with Borg 1.0.9 to address security issues. This means that modifying the repository after doing this with a version prior to 1.0.9 will raise a validation error, so only perform this upgrade after updating all clients using the repository to 1.0.9 or newer. This upgrade should be done on each client for safety reasons. If a repository is accidentally modified with a pre-1.0.9 client after this upgrade, use ``borg upgrade --tam --force REPO`` to remedy it. If you routinely do this you might not want to enable this upgrade (which will leave you exposed to the security issue). You can reverse the upgrade by issuing ``borg upgrade --disable-tam REPO``. See https://borgbackup.readthedocs.io/en/stable/changes.html#pre-1-0-9-manifest-spoofing-vulnerability for details. Attic and Borg 0.xx to Borg 1.x +++++++++++++++++++++++++++++++ This currently supports converting an Attic repository to Borg and also helps with converting Borg 0.xx to 1.0. Currently, only LOCAL repositories can be upgraded (issue #465). Please note that ``borg create`` (since 1.0.0) uses bigger chunks by default than old borg or attic did, so the new chunks won't deduplicate with the old chunks in the upgraded repository. See ``--chunker-params`` option of ``borg create`` and ``borg recreate``. ``borg upgrade`` will change the magic strings in the repository's segments to match the new Borg magic strings. The keyfiles found in $ATTIC_KEYS_DIR or ~/.attic/keys/ will also be converted and copied to $BORG_KEYS_DIR or ~/.config/borg/keys. The cache files are converted, from $ATTIC_CACHE_DIR or ~/.cache/attic to $BORG_CACHE_DIR or ~/.cache/borg, but the cache layout between Borg and Attic changed, so it is possible the first backup after the conversion takes longer than expected due to the cache resync. Upgrade should be able to resume if interrupted, although it will still iterate over all segments. If you want to start from scratch, use `borg delete` over the copied repository to make sure the cache files are also removed:: borg delete borg Unless ``--inplace`` is specified, the upgrade process first creates a backup copy of the repository, in REPOSITORY.before-upgrade-DATETIME, using hardlinks. This requires that the repository and its parent directory reside on same filesystem so the hardlink copy can work. This takes longer than in place upgrades, but is much safer and gives progress information (as opposed to ``cp -al``). Once you are satisfied with the conversion, you can safely destroy the backup copy. WARNING: Running the upgrade in place will make the current copy unusable with older version, with no way of going back to previous versions. This can PERMANENTLY DAMAGE YOUR REPOSITORY! Attic CAN NOT READ BORG REPOSITORIES, as the magic strings have changed. You have been warned.borgbackup-1.1.15/docs/usage/check.rst0000644000175000017500000000003313771325506017544 0ustar useruser00000000000000.. include:: check.rst.inc borgbackup-1.1.15/docs/usage/prune.rst.inc0000644000175000017500000003766713771325506020417 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_prune: borg prune ---------- .. code-block:: none borg [common options] prune [options] [REPOSITORY] .. only:: html .. class:: borg-options-table +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY`` | repository to prune | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-n``, ``--dry-run`` | do not change repository | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--force`` | force pruning of corrupted archives, use ``--force --force`` in case ``--force`` does not work. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-s``, ``--stats`` | print statistics for the deleted archive | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--list`` | output verbose list of archives it keeps/prunes | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--keep-within INTERVAL`` | keep all archives within this time interval | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--keep-last``, ``--keep-secondly`` | number of secondly archives to keep | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--keep-minutely`` | number of minutely archives to keep | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-H``, ``--keep-hourly`` | number of hourly archives to keep | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-d``, ``--keep-daily`` | number of daily archives to keep | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-w``, ``--keep-weekly`` | number of weekly archives to keep | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-m``, ``--keep-monthly`` | number of monthly archives to keep | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-y``, ``--keep-yearly`` | number of yearly archives to keep | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--save-space`` | work slower, but using less space | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive filters** — Archive filters can be applied to repository targets. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY repository to prune optional arguments -n, --dry-run do not change repository --force force pruning of corrupted archives, use ``--force --force`` in case ``--force`` does not work. -s, --stats print statistics for the deleted archive --list output verbose list of archives it keeps/prunes --keep-within INTERVAL keep all archives within this time interval --keep-last, --keep-secondly number of secondly archives to keep --keep-minutely number of minutely archives to keep -H, --keep-hourly number of hourly archives to keep -d, --keep-daily number of daily archives to keep -w, --keep-weekly number of weekly archives to keep -m, --keep-monthly number of monthly archives to keep -y, --keep-yearly number of yearly archives to keep --save-space work slower, but using less space :ref:`common_options` | Archive filters -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. Description ~~~~~~~~~~~ The prune command prunes a repository by deleting all archives not matching any of the specified retention options. This command is normally used by automated backup scripts wanting to keep a certain number of historic backups. Also, prune automatically removes checkpoint archives (incomplete archives left behind by interrupted backup runs) except if the checkpoint is the latest archive (and thus still needed). Checkpoint archives are not considered when comparing archive counts against the retention limits (``--keep-X``). If a prefix is set with -P, then only archives that start with the prefix are considered for deletion and only those archives count towards the totals specified by the rules. Otherwise, *all* archives in the repository are candidates for deletion! There is no automatic distinction between archives representing different contents. These need to be distinguished by specifying matching prefixes. If you have multiple sequences of archives with different data sets (e.g. from different machines) in one shared repository, use one prune call per data set that matches only the respective archives using the -P option. The ``--keep-within`` option takes an argument of the form "", where char is "H", "d", "w", "m", "y". For example, ``--keep-within 2d`` means to keep all archives that were created within the past 48 hours. "1m" is taken to mean "31d". The archives kept with this option do not count towards the totals specified by any other options. A good procedure is to thin out more and more the older your backups get. As an example, ``--keep-daily 7`` means to keep the latest backup on each day, up to 7 most recent days with backups (days without backups do not count). The rules are applied from secondly to yearly, and backups selected by previous rules do not count towards those of later rules. The time that each backup starts is used for pruning purposes. Dates and times are interpreted in the local timezone, and weeks go from Monday to Sunday. Specifying a negative number of archives to keep means that there is no limit. The ``--keep-last N`` option is doing the same as ``--keep-secondly N`` (and it will keep the last N archives under the assumption that you do not create more than one backup archive in the same second). When using ``--stats``, you will get some statistics about how much data was deleted - the "Deleted data" deduplicated size there is most interesting as that is how much your repository will shrink. Please note that the "All archives" stats refer to the state after pruning.borgbackup-1.1.15/docs/usage/lock.rst0000644000175000017500000000010013771325506017412 0ustar useruser00000000000000.. include:: with-lock.rst.inc .. include:: break-lock.rst.inc borgbackup-1.1.15/docs/usage/serve.rst.inc0000644000175000017500000002336413771325506020377 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_serve: borg serve ---------- .. code-block:: none borg [common options] serve [options] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--restrict-to-path PATH`` | restrict repository access to PATH. Can be specified multiple times to allow the client access to several directories. Access to all sub-directories is granted implicitly; PATH doesn't need to directly point to a repository. | +-------------------------------------------------------+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--restrict-to-repository PATH`` | restrict repository access. Only the repository located at PATH (no sub-directories are considered) is accessible. Can be specified multiple times to allow the client access to several repositories. Unlike ``--restrict-to-path`` sub-directories are not accessible; PATH needs to directly point at a repository location. PATH may be an empty directory or the last element of PATH may not exist, in which case the client may initialize a repository there. | +-------------------------------------------------------+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--append-only`` | only allow appending to repository segment files | +-------------------------------------------------------+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--storage-quota QUOTA`` | Override storage quota of the repository (e.g. 5G, 1.5T). When a new repository is initialized, sets the storage quota on the new repository as well. Default: no quota. | +-------------------------------------------------------+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex optional arguments --restrict-to-path PATH restrict repository access to PATH. Can be specified multiple times to allow the client access to several directories. Access to all sub-directories is granted implicitly; PATH doesn't need to directly point to a repository. --restrict-to-repository PATH restrict repository access. Only the repository located at PATH (no sub-directories are considered) is accessible. Can be specified multiple times to allow the client access to several repositories. Unlike ``--restrict-to-path`` sub-directories are not accessible; PATH needs to directly point at a repository location. PATH may be an empty directory or the last element of PATH may not exist, in which case the client may initialize a repository there. --append-only only allow appending to repository segment files --storage-quota QUOTA Override storage quota of the repository (e.g. 5G, 1.5T). When a new repository is initialized, sets the storage quota on the new repository as well. Default: no quota. :ref:`common_options` | Description ~~~~~~~~~~~ This command starts a repository server process. This command is usually not used manually.borgbackup-1.1.15/docs/usage/config.rst.inc0000644000175000017500000001015013771325506020505 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_config: borg config ----------- .. code-block:: none borg [common options] config [options] [REPOSITORY] [NAME] [VALUE] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------------+----------------------------------------+ | **positional arguments** | +-------------------------------------------------------+----------------------+----------------------------------------+ | | ``REPOSITORY`` | repository to configure | +-------------------------------------------------------+----------------------+----------------------------------------+ | | ``NAME`` | name of config key | +-------------------------------------------------------+----------------------+----------------------------------------+ | | ``VALUE`` | new value for key | +-------------------------------------------------------+----------------------+----------------------------------------+ | **optional arguments** | +-------------------------------------------------------+----------------------+----------------------------------------+ | | ``-c``, ``--cache`` | get and set values from the repo cache | +-------------------------------------------------------+----------------------+----------------------------------------+ | | ``-d``, ``--delete`` | delete the key from the config file | +-------------------------------------------------------+----------------------+----------------------------------------+ | | ``-l``, ``--list`` | list the configuration of the repo | +-------------------------------------------------------+----------------------+----------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------------+----------------------------------------+ .. raw:: html .. only:: latex REPOSITORY repository to configure NAME name of config key VALUE new value for key optional arguments -c, --cache get and set values from the repo cache -d, --delete delete the key from the config file -l, --list list the configuration of the repo :ref:`common_options` | Description ~~~~~~~~~~~ This command gets and sets options in a local repository or cache config file. For security reasons, this command only works on local repositories. To delete a config value entirely, use ``--delete``. To list the values of the configuration file or the default values, use ``--list``. To get and existing key, pass only the key name. To set a key, pass both the key name and the new value. Keys can be specified in the format "section.name" or simply "name"; the section will default to "repository" and "cache" for the repo and cache configs, respectively. By default, borg config manipulates the repository config file. Using ``--cache`` edits the repository cache's config file instead.borgbackup-1.1.15/docs/usage/upgrade.rst0000644000175000017500000000167513771325506020133 0ustar useruser00000000000000.. include:: upgrade.rst.inc Examples ~~~~~~~~ :: # Upgrade the borg repository to the most recent version. $ borg upgrade -v /path/to/repo making a hardlink copy in /path/to/repo.before-upgrade-2016-02-15-20:51:55 opening attic repository with borg and converting no key file found for repository converting repo index /path/to/repo/index.0 converting 1 segments... converting borg 0.xx to borg current no key file found for repository .. _borg_key_migrate-to-repokey: Upgrading a passphrase encrypted attic repo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ attic offered a "passphrase" encryption mode, but this was removed in borg 1.0 and replaced by the "repokey" mode (which stores the passphrase-protected encryption key into the repository config). Thus, to upgrade a "passphrase" attic repo to a "repokey" borg repo, 2 steps are needed, in this order: - borg upgrade repo - borg key migrate-to-repokey repo borgbackup-1.1.15/docs/usage/list.rst.inc0000644000175000017500000005210313771325506020217 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_list: borg list --------- .. code-block:: none borg [common options] list [options] [REPOSITORY_OR_ARCHIVE] [PATH...] .. only:: html .. class:: borg-options-table +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to list contents of | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to list; patterns are supported | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--short`` | only print file/directory names, nothing else | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--format FORMAT``, ``--list-format FORMAT`` | specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NL}") | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--json`` | Only valid for listing repository contents. Format output as JSON. The form of ``--format`` is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "barchive" key is therefore not available. | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--json-lines`` | Only valid for listing archive contents. Format output as JSON Lines. The form of ``--format`` is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "bpath" key is therefore not available. | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive filters** — Archive filters can be applied to repository targets. | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--sort-by KEYS`` | Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--first N`` | consider first N archives after other filters were applied | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--last N`` | consider last N archives after other filters were applied | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Exclusion options** | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY_OR_ARCHIVE repository or archive to list contents of PATH paths to list; patterns are supported optional arguments --short only print file/directory names, nothing else --format FORMAT, --list-format FORMAT specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NL}") --json Only valid for listing repository contents. Format output as JSON. The form of ``--format`` is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "barchive" key is therefore not available. --json-lines Only valid for listing archive contents. Format output as JSON Lines. The form of ``--format`` is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "bpath" key is therefore not available. :ref:`common_options` | Archive filters -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. --sort-by KEYS Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp --first N consider first N archives after other filters were applied --last N consider last N archives after other filters were applied Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line Description ~~~~~~~~~~~ This command lists the contents of a repository or an archive. See the "borg help patterns" command for more help on exclude patterns. .. man NOTES The following keys are available for ``--format``: - NEWLINE: OS dependent line separator - NL: alias of NEWLINE - NUL: NUL character for creating print0 / xargs -0 like output, see barchive/bpath - SPACE - TAB - CR - LF Keys for listing repository archives: - archive: archive name interpreted as text (might be missing non-text characters, see barchive) - name: alias of "archive" - barchive: verbatim archive name, can contain any character except NUL - comment: archive comment interpreted as text (might be missing non-text characters, see bcomment) - bcomment: verbatim archive comment, can contain any character except NUL - id: internal ID of the archive - start: time (start) of creation of the archive - time: alias of "start" - end: time (end) of creation of the archive - hostname: hostname of host on which this archive was created - username: username of user who created this archive Keys for listing archive files: - type - mode - uid - gid - user - group - path: path interpreted as text (might be missing non-text characters, see bpath) - bpath: verbatim POSIX path, can contain any character except NUL - source: link target for links (identical to linktarget) - linktarget - flags - size - csize: compressed size - dsize: deduplicated size - dcsize: deduplicated compressed size - num_chunks: number of chunks in this file - unique_chunks: number of unique chunks in this file - mtime - ctime - atime - isomtime - isoctime - isoatime - md5 - sha1 - sha224 - sha256 - sha384 - sha512 - archiveid - archivename - extra: prepends {source} with " -> " for soft links and " link to " for hard links - health: either "healthy" (file ok) or "broken" (if file has all-zero replacement chunks) borgbackup-1.1.15/docs/usage/general/0000755000175000017500000000000013771325773017364 5ustar useruser00000000000000borgbackup-1.1.15/docs/usage/general/date-time.rst.inc0000644000175000017500000000045113771325506022531 0ustar useruser00000000000000Date and Time ~~~~~~~~~~~~~ We format date and time conforming to ISO-8601, that is: YYYY-MM-DD and HH:MM:SS (24h clock). For more information about that, see: https://xkcd.com/1179/ Unless otherwise noted, we display local date and time. Internally, we store and process date and time as UTC. borgbackup-1.1.15/docs/usage/general/resources.rst.inc0000644000175000017500000001101413771325506022667 0ustar useruser00000000000000Resource Usage ~~~~~~~~~~~~~~ Borg might use a lot of resources depending on the size of the data set it is dealing with. If one uses Borg in a client/server way (with a ssh: repository), the resource usage occurs in part on the client and in another part on the server. If one uses Borg as a single process (with a filesystem repo), all the resource usage occurs in that one process, so just add up client + server to get the approximate resource usage. CPU client: - **borg create:** does chunking, hashing, compression, crypto (high CPU usage) - **chunks cache sync:** quite heavy on CPU, doing lots of hashtable operations. - **borg extract:** crypto, decompression (medium to high CPU usage) - **borg check:** similar to extract, but depends on options given. - **borg prune / borg delete archive:** low to medium CPU usage - **borg delete repo:** done on the server It won't go beyond 100% of 1 core as the code is currently single-threaded. Especially higher zlib and lzma compression levels use significant amounts of CPU cycles. Crypto might be cheap on the CPU (if hardware accelerated) or expensive (if not). CPU server: It usually doesn't need much CPU, it just deals with the key/value store (repository) and uses the repository index for that. borg check: the repository check computes the checksums of all chunks (medium CPU usage) borg delete repo: low CPU usage CPU (only for client/server operation): When using borg in a client/server way with a ssh:-type repo, the ssh processes used for the transport layer will need some CPU on the client and on the server due to the crypto they are doing - esp. if you are pumping big amounts of data. Memory (RAM) client: The chunks index and the files index are read into memory for performance reasons. Might need big amounts of memory (see below). Compression, esp. lzma compression with high levels might need substantial amounts of memory. Memory (RAM) server: The server process will load the repository index into memory. Might need considerable amounts of memory, but less than on the client (see below). Chunks index (client only): Proportional to the amount of data chunks in your repo. Lots of chunks in your repo imply a big chunks index. It is possible to tweak the chunker params (see create options). Files index (client only): Proportional to the amount of files in your last backups. Can be switched off (see create options), but next backup might be much slower if you do. The speed benefit of using the files cache is proportional to file size. Repository index (server only): Proportional to the amount of data chunks in your repo. Lots of chunks in your repo imply a big repository index. It is possible to tweak the chunker params (see create options) to influence the amount of chunks being created. Temporary files (client): Reading data and metadata from a FUSE mounted repository will consume up to the size of all deduplicated, small chunks in the repository. Big chunks won't be locally cached. Temporary files (server): A non-trivial amount of data will be stored on the remote temp directory for each client that connects to it. For some remotes, this can fill the default temporary directory at /tmp. This can be remediated by ensuring the $TMPDIR, $TEMP, or $TMP environment variable is properly set for the sshd process. For some OSes, this can be done just by setting the correct value in the .bashrc (or equivalent login config file for other shells), however in other cases it may be necessary to first enable ``PermitUserEnvironment yes`` in your ``sshd_config`` file, then add ``environment="TMPDIR=/my/big/tmpdir"`` at the start of the public key to be used in the ``authorized_hosts`` file. Cache files (client only): Contains the chunks index and files index (plus a collection of single- archive chunk indexes which might need huge amounts of disk space, depending on archive count and size - see FAQ about how to reduce). Network (only for client/server operation): If your repository is remote, all deduplicated (and optionally compressed/ encrypted) data of course has to go over the connection (``ssh://`` repo url). If you use a locally mounted network filesystem, additionally some copy operations used for transaction support also go over the connection. If you backup multiple sources to one target repository, additional traffic happens for cache resynchronization. borgbackup-1.1.15/docs/usage/general/file-systems.rst.inc0000644000175000017500000000251713771325506023311 0ustar useruser00000000000000File systems ~~~~~~~~~~~~ We strongly recommend against using Borg (or any other database-like software) on non-journaling file systems like FAT, since it is not possible to assume any consistency in case of power failures (or a sudden disconnect of an external drive or similar failures). While Borg uses a data store that is resilient against these failures when used on journaling file systems, it is not possible to guarantee this with some hardware -- independent of the software used. We don't know a list of affected hardware. If you are suspicious whether your Borg repository is still consistent and readable after one of the failures mentioned above occurred, run ``borg check --verify-data`` to make sure it is consistent. .. rubric:: Requirements for Borg repository file systems - Long file names - At least three directory levels with short names - Typically, file sizes up to a few hundred MB. Large repositories may require large files (>2 GB). - Up to 1000 files per directory (10000 for repositories initialized with Borg 1.0) - mkdir(2) should be atomic, since it is used for locking - Hardlinks are needed for :ref:`borg_upgrade` (if ``--inplace`` option is not used). Also hardlinks are used for more safe and secure file updating (e.g. of the repo config file), but the code tries to work also if hardlinks are not supported. borgbackup-1.1.15/docs/usage/general/return-codes.rst.inc0000644000175000017500000000117213771325506023273 0ustar useruser00000000000000Return codes ~~~~~~~~~~~~ Borg can exit with the following return codes (rc): =========== ======= Return code Meaning =========== ======= 0 success (logged as INFO) 1 warning (operation reached its normal end, but there were warnings -- you should check the log, logged as WARNING) 2 error (like a fatal error, a local or remote exception, the operation did not reach its normal end, logged as ERROR) 128+N killed by signal N (e.g. 137 == kill -9) =========== ======= If you use ``--show-rc``, the return code is also logged at the indicated level as the last log entry. borgbackup-1.1.15/docs/usage/general/environment.rst.inc0000644000175000017500000002230013771325506023221 0ustar useruser00000000000000Environment Variables ~~~~~~~~~~~~~~~~~~~~~ Borg uses some environment variables for automation: General: BORG_REPO When set, use the value to give the default repository location. If a command needs an archive parameter, you can abbreviate as ``::archive``. If a command needs a repository parameter, you can either leave it away or abbreviate as ``::``, if a positional parameter is required. BORG_PASSPHRASE When set, use the value to answer the passphrase question for encrypted repositories. It is used when a passphrase is needed to access an encrypted repo as well as when a new passphrase should be initially set when initializing an encrypted repo. See also BORG_NEW_PASSPHRASE. BORG_PASSCOMMAND When set, use the standard output of the command (trailing newlines are stripped) to answer the passphrase question for encrypted repositories. It is used when a passphrase is needed to access an encrypted repo as well as when a new passphrase should be initially set when initializing an encrypted repo. Note that the command is executed without a shell. So variables, like ``$HOME`` will work, but ``~`` won't. If BORG_PASSPHRASE is also set, it takes precedence. See also BORG_NEW_PASSPHRASE. BORG_PASSPHRASE_FD When set, specifies a file descriptor to read a passphrase from. Programs starting borg may choose to open an anonymous pipe and use it to pass a passphrase. This is safer than passing via BORG_PASSPHRASE, because on some systems (e.g. Linux) environment can be examined by other processes. If BORG_PASSPHRASE or BORG_PASSCOMMAND are also set, they take precedence. BORG_NEW_PASSPHRASE When set, use the value to answer the passphrase question when a **new** passphrase is asked for. This variable is checked first. If it is not set, BORG_PASSPHRASE and BORG_PASSCOMMAND will also be checked. Main usecase for this is to fully automate ``borg change-passphrase``. BORG_DISPLAY_PASSPHRASE When set, use the value to answer the "display the passphrase for verification" question when defining a new passphrase for encrypted repositories. BORG_HOSTNAME_IS_UNIQUE=no Borg assumes that it can derive a unique hostname / identity (see ``borg debug info``). If this is not the case or you do not want Borg to automatically remove stale locks, set this to *no*. BORG_HOST_ID Borg usually computes a host id from the FQDN plus the results of ``uuid.getnode()`` (which usually returns a unique id based on the MAC address of the network interface. Except if that MAC happens to be all-zero - in that case it returns a random value, which is not what we want (because it kills automatic stale lock removal). So, if you have a all-zero MAC address or other reasons to better externally control the host id, just set this environment variable to a unique value. If all your FQDNs are unique, you can just use the FQDN. If not, use fqdn@uniqueid. BORG_LOGGING_CONF When set, use the given filename as INI_-style logging configuration. A basic example conf can be found at ``docs/misc/logging.conf``. BORG_RSH When set, use this command instead of ``ssh``. This can be used to specify ssh options, such as a custom identity file ``ssh -i /path/to/private/key``. See ``man ssh`` for other options. Using the ``--rsh CMD`` commandline option overrides the environment variable. BORG_REMOTE_PATH When set, use the given path as borg executable on the remote (defaults to "borg" if unset). Using ``--remote-path PATH`` commandline option overrides the environment variable. BORG_FILES_CACHE_SUFFIX When set to a value at least one character long, instructs borg to use a specifically named (based on the suffix) alternative files cache. This can be used to avoid loading and saving cache entries for backup sources other than the current sources. BORG_FILES_CACHE_TTL When set to a numeric value, this determines the maximum "time to live" for the files cache entries (default: 20). The files cache is used to quickly determine whether a file is unchanged. The FAQ explains this more detailed in: :ref:`always_chunking` BORG_SHOW_SYSINFO When set to no (default: yes), system information (like OS, Python version, ...) in exceptions is not shown. Please only use for good reasons as it makes issues harder to analyze. BORG_WORKAROUNDS A list of comma separated strings that trigger workarounds in borg, e.g. to work around bugs in other software. Currently known strings are: basesyncfile Use the more simple BaseSyncFile code to avoid issues with sync_file_range. You might need this to run borg on WSL (Windows Subsystem for Linux) or in systemd.nspawn containers on some architectures (e.g. ARM). Using this does not affect data safety, but might result in a more bursty write to disk behaviour (not continuously streaming to disk). Some automatic "answerers" (if set, they automatically answer confirmation questions): BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=no (or =yes) For "Warning: Attempting to access a previously unknown unencrypted repository" BORG_RELOCATED_REPO_ACCESS_IS_OK=no (or =yes) For "Warning: The repository at location ... was previously located at ..." BORG_CHECK_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) For "This is a potentially dangerous function..." (check --repair) BORG_DELETE_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) For "You requested to completely DELETE the repository *including* all archives it contains:" Note: answers are case sensitive. setting an invalid answer value might either give the default answer or ask you interactively, depending on whether retries are allowed (they by default are allowed). So please test your scripts interactively before making them a non-interactive script. .. _XDG env var: https://specifications.freedesktop.org/basedir-spec/0.6/ar01s03.html Directories and files: BORG_BASE_DIR Defaults to ``$HOME`` or ``~$USER`` or ``~`` (in that order). If you want to move all borg-specific folders to a custom path at once, all you need to do is to modify ``BORG_BASE_DIR``: the other paths for cache, config etc. will adapt accordingly (assuming you didn't set them to a different custom value). BORG_CACHE_DIR Defaults to ``$BORG_BASE_DIR/.cache/borg``. If ``BORG_BASE_DIR`` is not explicitly set while `XDG env var`_ ``XDG_CACHE_HOME`` is set, then ``$XDG_CACHE_HOME/borg`` is being used instead. This directory contains the local cache and might need a lot of space for dealing with big repositories. Make sure you're aware of the associated security aspects of the cache location: :ref:`cache_security` BORG_CONFIG_DIR Defaults to ``$BORG_BASE_DIR/.config/borg``. If ``BORG_BASE_DIR`` is not explicitly set while `XDG env var`_ ``XDG_CONFIG_HOME`` is set, then ``$XDG_CONFIG_HOME/borg`` is being used instead. This directory contains all borg configuration directories, see the FAQ for a security advisory about the data in this directory: :ref:`home_config_borg` BORG_SECURITY_DIR Defaults to ``$BORG_CONFIG_DIR/security``. This directory contains information borg uses to track its usage of NONCES ("numbers used once" - usually in encryption context) and other security relevant data. BORG_KEYS_DIR Defaults to ``$BORG_CONFIG_DIR/keys``. This directory contains keys for encrypted repositories. BORG_KEY_FILE When set, use the given filename as repository key file. TMPDIR This is where temporary files are stored (might need a lot of temporary space for some operations), see tempfile_ for details. Building: BORG_OPENSSL_PREFIX Adds given OpenSSL header file directory to the default locations (setup.py). BORG_LIBLZ4_PREFIX Adds given prefix directory to the default locations. If a 'include/lz4.h' is found Borg will be linked against the system liblz4 instead of a bundled implementation. (setup.py) BORG_LIBB2_PREFIX Adds given prefix directory to the default locations. If a 'include/blake2.h' is found Borg will be linked against the system libb2 instead of a bundled implementation. (setup.py) BORG_LIBZSTD_PREFIX Adds given prefix directory to the default locations. If a 'include/zstd.h' is found Borg will be linked against the system libzstd instead of a bundled implementation. (setup.py) Please note: - Be very careful when using the "yes" sayers, the warnings with prompt exist for your / your data's security/safety. - Also be very careful when putting your passphrase into a script, make sure it has appropriate file permissions (e.g. mode 600, root:root). .. _INI: https://docs.python.org/3/library/logging.config.html#configuration-file-format .. _tempfile: https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir borgbackup-1.1.15/docs/usage/general/repository-locations.rst.inc0000644000175000017500000000112513771325506025067 0ustar useruser00000000000000Repository / Archive Locations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Many commands want either a repository (just give the repo URL, see above) or an archive location, which is a repo URL followed by ``::archive_name``. Archive names must not contain the ``/`` (slash) character. For simplicity, maybe also avoid blanks or other characters that have special meaning on the shell or in a filesystem (borg mount will use the archive name as directory name). If you have set BORG_REPO (see above) and an archive location is needed, use ``::archive_name`` - the repo URL part is then read from BORG_REPO. borgbackup-1.1.15/docs/usage/general/units.rst.inc0000644000175000017500000000076313771325506022030 0ustar useruser00000000000000Units ~~~~~ To display quantities, Borg takes care of respecting the usual conventions of scale. Disk sizes are displayed in `decimal `_, using powers of ten (so ``kB`` means 1000 bytes). For memory usage, `binary prefixes `_ are used, and are indicated using the `IEC binary prefixes `_, using powers of two (so ``KiB`` means 1024 bytes). borgbackup-1.1.15/docs/usage/general/file-metadata.rst.inc0000644000175000017500000000577413771325506023372 0ustar useruser00000000000000Support for file metadata ~~~~~~~~~~~~~~~~~~~~~~~~~ Besides regular file and directory structures, Borg can preserve * symlinks (stored as symlink, the symlink is not followed) * special files: * character and block device files (restored via mknod) * FIFOs ("named pipes") * special file *contents* can be backed up in ``--read-special`` mode. By default the metadata to create them with mknod(2), mkfifo(2) etc. is stored. * hardlinked regular files, devices, FIFOs (considering all items in the same archive) * timestamps in nanosecond precision: mtime, atime, ctime * other timestamps: birthtime (on platforms supporting it) * permissions: * IDs of owning user and owning group * names of owning user and owning group (if the IDs can be resolved) * Unix Mode/Permissions (u/g/o permissions, suid, sgid, sticky) On some platforms additional features are supported: .. Yes/No's are grouped by reason/mechanism/reference. +-------------------------+----------+-----------+------------+ | Platform | ACLs | xattr | Flags | | | [#acls]_ | [#xattr]_ | [#flags]_ | +=========================+==========+===========+============+ | Linux | Yes | Yes | Yes [1]_ | +-------------------------+----------+-----------+------------+ | Mac OS X | Yes | Yes | Yes (all) | +-------------------------+----------+-----------+------------+ | FreeBSD | Yes | Yes | Yes (all) | +-------------------------+----------+-----------+------------+ | OpenBSD | n/a | n/a | Yes (all) | +-------------------------+----------+-----------+------------+ | NetBSD | n/a | No [2]_ | Yes (all) | +-------------------------+----------+-----------+------------+ | Solaris and derivatives | No [3]_ | No [3]_ | n/a | +-------------------------+----------+-----------+------------+ | Windows (cygwin) | No [4]_ | No | No | +-------------------------+----------+-----------+------------+ Other Unix-like operating systems may work as well, but have not been tested at all. Note that most of the platform-dependent features also depend on the file system. For example, ntfs-3g on Linux isn't able to convey NTFS ACLs. .. [1] Only "nodump", "immutable", "compressed" and "append" are supported. Feature request :issue:`618` for more flags. .. [2] Feature request :issue:`1332` .. [3] Feature request :issue:`1337` .. [4] Cygwin tries to map NTFS ACLs to permissions with varying degrees of success. .. [#acls] The native access control list mechanism of the OS. This normally limits access to non-native ACLs. For example, NTFS ACLs aren't completely accessible on Linux with ntfs-3g. .. [#xattr] extended attributes; key-value pairs attached to a file, mainly used by the OS. This includes resource forks on Mac OS X. .. [#flags] aka *BSD flags*. The Linux set of flags [1]_ is portable across platforms. The BSDs define additional flags. borgbackup-1.1.15/docs/usage/general/repository-urls.rst.inc0000644000175000017500000000350613771325506024066 0ustar useruser00000000000000Repository URLs ~~~~~~~~~~~~~~~ **Local filesystem** (or locally mounted network filesystem): ``/path/to/repo`` - filesystem path to repo directory, absolute path ``path/to/repo`` - filesystem path to repo directory, relative path Also, stuff like ``~/path/to/repo`` or ``~other/path/to/repo`` works (this is expanded by your shell). Note: you may also prepend a ``file://`` to a filesystem path to get URL style. **Remote repositories** accessed via ssh user@host: ``user@host:/path/to/repo`` - remote repo, absolute path ``ssh://user@host:port/path/to/repo`` - same, alternative syntax, port can be given **Remote repositories with relative paths** can be given using this syntax: ``user@host:path/to/repo`` - path relative to current directory ``user@host:~/path/to/repo`` - path relative to user's home directory ``user@host:~other/path/to/repo`` - path relative to other's home directory Note: giving ``user@host:/./path/to/repo`` or ``user@host:/~/path/to/repo`` or ``user@host:/~other/path/to/repo`` is also supported, but not required here. **Remote repositories with relative paths, alternative syntax with port**: ``ssh://user@host:port/./path/to/repo`` - path relative to current directory ``ssh://user@host:port/~/path/to/repo`` - path relative to user's home directory ``ssh://user@host:port/~other/path/to/repo`` - path relative to other's home directory If you frequently need the same repo URL, it is a good idea to set the ``BORG_REPO`` environment variable to set a default for the repo URL: :: export BORG_REPO='ssh://user@host:port/path/to/repo' Then just leave away the repo URL if only a repo URL is needed and you want to use the default - it will be read from BORG_REPO then. Use ``::`` syntax to give the repo URL when syntax requires giving a positional argument for the repo (e.g. ``borg mount :: /mnt``). borgbackup-1.1.15/docs/usage/general/positional-arguments.rst.inc0000644000175000017500000000120513771325506025042 0ustar useruser00000000000000Positional Arguments and Options: Order matters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Borg only supports taking options (``-s`` and ``--progress`` in the example) to the left or right of all positional arguments (``repo::archive`` and ``path`` in the example), but not in between them: :: borg create -s --progress repo::archive path # good and preferred borg create repo::archive path -s --progress # also works borg create -s repo::archive path --progress # works, but ugly borg create repo::archive -s --progress path # BAD This is due to a problem in the argparse module: https://bugs.python.org/issue15112 borgbackup-1.1.15/docs/usage/general/logging.rst.inc0000644000175000017500000000320213771325506022303 0ustar useruser00000000000000Logging ~~~~~~~ Borg writes all log output to stderr by default. But please note that something showing up on stderr does *not* indicate an error condition just because it is on stderr. Please check the log levels of the messages and the return code of borg for determining error, warning or success conditions. If you want to capture the log output to a file, just redirect it: :: borg create repo::archive myfiles 2>> logfile Custom logging configurations can be implemented via BORG_LOGGING_CONF. The log level of the builtin logging configuration defaults to WARNING. This is because we want Borg to be mostly silent and only output warnings, errors and critical messages, unless output has been requested by supplying an option that implies output (e.g. ``--list`` or ``--progress``). Log levels: DEBUG < INFO < WARNING < ERROR < CRITICAL Use ``--debug`` to set DEBUG log level - to get debug, info, warning, error and critical level output. Use ``--info`` (or ``-v`` or ``--verbose``) to set INFO log level - to get info, warning, error and critical level output. Use ``--warning`` (default) to set WARNING log level - to get warning, error and critical level output. Use ``--error`` to set ERROR log level - to get error and critical level output. Use ``--critical`` to set CRITICAL log level - to get critical level output. While you can set misc. log levels, do not expect that every command will give different output on different log levels - it's just a possibility. .. warning:: Options ``--critical`` and ``--error`` are provided for completeness, their usage is not recommended as you might miss important information. borgbackup-1.1.15/docs/usage/rename.rst.inc0000644000175000017500000000351413771325506020515 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_rename: borg rename ----------- .. code-block:: none borg [common options] rename [options] ARCHIVE NEWNAME .. only:: html .. class:: borg-options-table +-------------------------------------------------------+-------------+-----------------------------+ | **positional arguments** | +-------------------------------------------------------+-------------+-----------------------------+ | | ``ARCHIVE`` | archive to rename | +-------------------------------------------------------+-------------+-----------------------------+ | | ``NEWNAME`` | the new archive name to use | +-------------------------------------------------------+-------------+-----------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+-------------+-----------------------------+ .. raw:: html .. only:: latex ARCHIVE archive to rename NEWNAME the new archive name to use :ref:`common_options` | Description ~~~~~~~~~~~ This command renames an archive in the repository. This results in a different archive ID.borgbackup-1.1.15/docs/usage/borgfs.rst0000644000175000017500000000004613771325506017755 0ustar useruser00000000000000:orphan: .. include:: borgfs.rst.inc borgbackup-1.1.15/docs/usage/benchmark_crud.rst.inc0000644000175000017500000001021113771325506022205 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_benchmark_crud: borg benchmark crud ------------------- .. code-block:: none borg [common options] benchmark crud [options] REPOSITORY PATH .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+----------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+----------------+----------------------------------------------+ | | ``REPOSITORY`` | repository to use for benchmark (must exist) | +-------------------------------------------------------+----------------+----------------------------------------------+ | | ``PATH`` | path were to create benchmark input data | +-------------------------------------------------------+----------------+----------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+----------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY repository to use for benchmark (must exist) PATH path were to create benchmark input data :ref:`common_options` | Description ~~~~~~~~~~~ This command benchmarks borg CRUD (create, read, update, delete) operations. It creates input data below the given PATH and backups this data into the given REPO. The REPO must already exist (it could be a fresh empty repo or an existing repo, the command will create / read / update / delete some archives named borg-benchmark-crud\* there. Make sure you have free space there, you'll need about 1GB each (+ overhead). If your repository is encrypted and borg needs a passphrase to unlock the key, use:: BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH Measurements are done with different input file sizes and counts. The file contents are very artificial (either all zero or all random), thus the measurement results do not necessarily reflect performance with real data. Also, due to the kind of content used, no compression is used in these benchmarks. C- == borg create (1st archive creation, no compression, do not use files cache) C-Z- == all-zero files. full dedup, this is primarily measuring reader/chunker/hasher. C-R- == random files. no dedup, measuring throughput through all processing stages. R- == borg extract (extract archive, dry-run, do everything, but do not write files to disk) R-Z- == all zero files. Measuring heavily duplicated files. R-R- == random files. No duplication here, measuring throughput through all processing stages, except writing to disk. U- == borg create (2nd archive creation of unchanged input files, measure files cache speed) The throughput value is kind of virtual here, it does not actually read the file. U-Z- == needs to check the 2 all-zero chunks' existence in the repo. U-R- == needs to check existence of a lot of different chunks in the repo. D- == borg delete archive (delete last remaining archive, measure deletion + compaction) D-Z- == few chunks to delete / few segments to compact/remove. D-R- == many chunks to delete / many segments to compact/remove. Please note that there might be quite some variance in these measurements. Try multiple measurements and having a otherwise idle machine (and network, if you use it).borgbackup-1.1.15/docs/usage/info.rst0000644000175000017500000000521213771325506017426 0ustar useruser00000000000000.. include:: info.rst.inc Examples ~~~~~~~~ :: $ borg info /path/to/repo::2017-06-29T11:00-srv Archive name: 2017-06-29T11:00-srv Archive fingerprint: b2f1beac2bd553b34e06358afa45a3c1689320d39163890c5bbbd49125f00fe5 Comment: Hostname: myhostname Username: root Time (start): Thu, 2017-06-29 11:03:07 Time (end): Thu, 2017-06-29 11:03:13 Duration: 5.66 seconds Number of files: 17037 Command line: /usr/sbin/borg create /path/to/repo::2017-06-29T11:00-srv /srv Utilization of max. archive size: 0% ------------------------------------------------------------------------------ Original size Compressed size Deduplicated size This archive: 12.53 GB 12.49 GB 1.62 kB All archives: 121.82 TB 112.41 TB 215.42 GB Unique chunks Total chunks Chunk index: 1015213 626934122 $ borg info /path/to/repo --last 1 Archive name: 2017-06-29T11:00-srv Archive fingerprint: b2f1beac2bd553b34e06358afa45a3c1689320d39163890c5bbbd49125f00fe5 Comment: Hostname: myhostname Username: root Time (start): Thu, 2017-06-29 11:03:07 Time (end): Thu, 2017-06-29 11:03:13 Duration: 5.66 seconds Number of files: 17037 Command line: /usr/sbin/borg create /path/to/repo::2017-06-29T11:00-srv /srv Utilization of max. archive size: 0% ------------------------------------------------------------------------------ Original size Compressed size Deduplicated size This archive: 12.53 GB 12.49 GB 1.62 kB All archives: 121.82 TB 112.41 TB 215.42 GB Unique chunks Total chunks Chunk index: 1015213 626934122 $ borg info /path/to/repo Repository ID: d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 Location: /path/to/repo Encrypted: Yes (repokey) Cache: /root/.cache/borg/d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 Security dir: /root/.config/borg/security/d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 ------------------------------------------------------------------------------ Original size Compressed size Deduplicated size All archives: 121.82 TB 112.41 TB 215.42 GB Unique chunks Total chunks Chunk index: 1015213 626934122 borgbackup-1.1.15/docs/usage/extract.rst0000644000175000017500000000140213771325506020142 0ustar useruser00000000000000.. include:: extract.rst.inc Examples ~~~~~~~~ :: # Extract entire archive $ borg extract /path/to/repo::my-files # Extract entire archive and list files while processing $ borg extract --list /path/to/repo::my-files # Verify whether an archive could be successfully extracted, but do not write files to disk $ borg extract --dry-run /path/to/repo::my-files # Extract the "src" directory $ borg extract /path/to/repo::my-files home/USERNAME/src # Extract the "src" directory but exclude object files $ borg extract /path/to/repo::my-files home/USERNAME/src --exclude '*.o' # Restore a raw device (must not be active/in use/mounted at that time) $ borg extract --stdout /path/to/repo::my-sdx | dd of=/dev/sdx bs=10M borgbackup-1.1.15/docs/usage/with-lock.rst.inc0000644000175000017500000000510013771325506021140 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_with-lock: borg with-lock -------------- .. code-block:: none borg [common options] with-lock [options] REPOSITORY COMMAND [ARGS...] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+--------------------+ | **positional arguments** | +-------------------------------------------------------+----------------+--------------------+ | | ``REPOSITORY`` | repository to lock | +-------------------------------------------------------+----------------+--------------------+ | | ``COMMAND`` | command to run | +-------------------------------------------------------+----------------+--------------------+ | | ``ARGS`` | command arguments | +-------------------------------------------------------+----------------+--------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+--------------------+ .. raw:: html .. only:: latex REPOSITORY repository to lock COMMAND command to run ARGS command arguments :ref:`common_options` | Description ~~~~~~~~~~~ This command runs a user-specified command while the repository lock is held. It will first try to acquire the lock (make sure that no other operation is running in the repo), then execute the given command as a subprocess and wait for its termination, release the lock and return the user command's return code as borg's return code. .. note:: If you copy a repository with the lock held, the lock will be present in the copy. Thus, before using borg on the copy from a different host, you need to use "borg break-lock" on the copied repository, because Borg is cautious and does not automatically remove stale locks made by a different host.borgbackup-1.1.15/docs/usage/mount.rst0000644000175000017500000000433013771325506017635 0ustar useruser00000000000000.. include:: mount.rst.inc .. include:: umount.rst.inc Examples ~~~~~~~~ :: # Mounting the repository shows all archives. # Archives are loaded lazily, expect some delay when navigating to an archive # for the first time. $ borg mount /path/to/repo /tmp/mymountpoint $ ls /tmp/mymountpoint root-2016-02-14 root-2016-02-15 $ borg umount /tmp/mymountpoint # Mounting a specific archive is possible as well. $ borg mount /path/to/repo::root-2016-02-15 /tmp/mymountpoint $ ls /tmp/mymountpoint bin boot etc home lib lib64 lost+found media mnt opt root sbin srv tmp usr var $ borg umount /tmp/mymountpoint # The experimental "versions view" merges all archives in the repository # and provides a versioned view on files. $ borg mount -o versions /path/to/repo /tmp/mymountpoint $ ls -l /tmp/mymountpoint/home/user/doc.txt/ total 24 -rw-rw-r-- 1 user group 12357 Aug 26 21:19 doc.cda00bc9.txt -rw-rw-r-- 1 user group 12204 Aug 26 21:04 doc.fa760f28.txt $ borg umount /tmp/mymountpoint # Archive filters are supported. # These are especially handy for the "versions view", # which does not support lazy processing of archives. $ borg mount -o versions --glob-archives '*-my-home' --last 10 /path/to/repo /tmp/mymountpoint # Exclusion options are supported. # These can speed up mounting and lower memory needs significantly. $ borg mount /path/to/repo /tmp/mymountpoint only/that/path $ borg mount --exclude '...' /path/to/repo /tmp/mymountpoint borgfs ++++++ :: $ echo '/mnt/backup /tmp/myrepo fuse.borgfs defaults,noauto 0 0' >> /etc/fstab $ echo '/mnt/backup::root-2016-02-15 /tmp/myarchive fuse.borgfs defaults,noauto 0 0' >> /etc/fstab $ mount /tmp/myrepo $ mount /tmp/myarchive $ ls /tmp/myrepo root-2016-02-01 root-2016-02-2015 $ ls /tmp/myarchive bin boot etc home lib lib64 lost+found media mnt opt root sbin srv tmp usr var .. Note:: ``borgfs`` will be automatically provided if you used a distribution package, ``pip`` or ``setup.py`` to install Borg. Users of the standalone binary will have to manually create a symlink (see :ref:`pyinstaller-binary`). borgbackup-1.1.15/docs/usage/delete.rst.inc0000644000175000017500000002714513771325506020516 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_delete: borg delete ----------- .. code-block:: none borg [common options] delete [options] [REPOSITORY_OR_ARCHIVE] [ARCHIVE...] .. only:: html .. class:: borg-options-table +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to delete | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``ARCHIVE`` | archives to delete | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-n``, ``--dry-run`` | do not change repository | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-s``, ``--stats`` | print statistics for the deleted archive | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--cache-only`` | delete only the local cache for the given repository | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--force`` | force deletion of corrupted archives, use ``--force --force`` in case ``--force`` does not work. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--save-space`` | work slower, but using less space | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive filters** — Archive filters can be applied to repository targets. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--sort-by KEYS`` | Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--first N`` | consider first N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--last N`` | consider last N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY_OR_ARCHIVE repository or archive to delete ARCHIVE archives to delete optional arguments -n, --dry-run do not change repository -s, --stats print statistics for the deleted archive --cache-only delete only the local cache for the given repository --force force deletion of corrupted archives, use ``--force --force`` in case ``--force`` does not work. --save-space work slower, but using less space :ref:`common_options` | Archive filters -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. --sort-by KEYS Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp --first N consider first N archives after other filters were applied --last N consider last N archives after other filters were applied Description ~~~~~~~~~~~ This command deletes an archive from the repository or the complete repository. Disk space is reclaimed accordingly. If you delete the complete repository, the local cache for it (if any) is also deleted. When using ``--stats``, you will get some statistics about how much data was deleted - the "Deleted data" deduplicated size there is most interesting as that is how much your repository will shrink. Please note that the "All archives" stats refer to the state after deletion.borgbackup-1.1.15/docs/usage/help.rst.inc0000644000175000017500000003302713771325506020200 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_patterns: borg help patterns ~~~~~~~~~~~~~~~~~~ The path/filenames used as input for the pattern matching start from the currently active recursion root. You usually give the recursion root(s) when invoking borg and these can be either relative or absolute paths. So, when you give `relative/` as root, the paths going into the matcher will look like `relative/.../file.ext`. When you give `/absolute/` as root, they will look like `/absolute/.../file.ext`. This is meant when we talk about "full path" below. File paths in Borg archives are always stored normalized and relative. This means that e.g. ``borg create /path/to/repo ../some/path`` will store all files as `some/path/.../file.ext` and ``borg create /path/to/repo /home/user`` will store all files as `home/user/.../file.ext`. Therefore, always use relative paths in your patterns when matching archive content in commands like ``extract`` or ``mount``. Starting with Borg 1.2 this behaviour will be changed to accept both absolute and relative paths. File patterns support these styles: fnmatch, shell, regular expressions, path prefixes and path full-matches. By default, fnmatch is used for ``--exclude`` patterns and shell-style is used for the experimental ``--pattern`` option. If followed by a colon (':') the first two characters of a pattern are used as a style selector. Explicit style selection is necessary when a non-default style is desired or when the desired pattern starts with two alphanumeric characters followed by a colon (i.e. `aa:something/*`). `Fnmatch `_, selector `fm:` This is the default style for ``--exclude`` and ``--exclude-from``. These patterns use a variant of shell pattern syntax, with '\*' matching any number of characters, '?' matching any single character, '[...]' matching any single character specified, including ranges, and '[!...]' matching any character not specified. For the purpose of these patterns, the path separator (backslash for Windows and '/' on other systems) is not treated specially. Wrap meta-characters in brackets for a literal match (i.e. `[?]` to match the literal character `?`). For a path to match a pattern, the full path must match, or it must match from the start of the full path to just before a path separator. Except for the root path, paths will never end in the path separator when matching is attempted. Thus, if a given pattern ends in a path separator, a '\*' is appended before matching is attempted. Shell-style patterns, selector `sh:` This is the default style for ``--pattern`` and ``--patterns-from``. Like fnmatch patterns these are similar to shell patterns. The difference is that the pattern may include `**/` for matching zero or more directory levels, `*` for matching zero or more arbitrary characters with the exception of any path separator. Regular expressions, selector `re:` Regular expressions similar to those found in Perl are supported. Unlike shell patterns regular expressions are not required to match the full path and any substring match is sufficient. It is strongly recommended to anchor patterns to the start ('^'), to the end ('$') or both. Path separators (backslash for Windows and '/' on other systems) in paths are always normalized to a forward slash ('/') before applying a pattern. The regular expression syntax is described in the `Python documentation for the re module `_. Path prefix, selector `pp:` This pattern style is useful to match whole sub-directories. The pattern `pp:root/somedir` matches `root/somedir` and everything therein. Path full-match, selector `pf:` This pattern style is (only) useful to match full paths. This is kind of a pseudo pattern as it can not have any variable or unspecified parts - the full path must be given. `pf:root/file.ext` matches `root/file.ext` only. Implementation note: this is implemented via very time-efficient O(1) hashtable lookups (this means you can have huge amounts of such patterns without impacting performance much). Due to that, this kind of pattern does not respect any context or order. If you use such a pattern to include a file, it will always be included (if the directory recursion encounters it). Other include/exclude patterns that would normally match will be ignored. Same logic applies for exclude. .. note:: `re:`, `sh:` and `fm:` patterns are all implemented on top of the Python SRE engine. It is very easy to formulate patterns for each of these types which requires an inordinate amount of time to match paths. If untrusted users are able to supply patterns, ensure they cannot supply `re:` patterns. Further, ensure that `sh:` and `fm:` patterns only contain a handful of wildcards at most. Exclusions can be passed via the command line option ``--exclude``. When used from within a shell the patterns should be quoted to protect them from expansion. The ``--exclude-from`` option permits loading exclusion patterns from a text file with one pattern per line. Lines empty or starting with the number sign ('#') after removing whitespace on both ends are ignored. The optional style selector prefix is also supported for patterns loaded from a file. Due to whitespace removal paths with whitespace at the beginning or end can only be excluded using regular expressions. To test your exclusion patterns without performing an actual backup you can run ``borg create --list --dry-run ...``. Examples:: # Exclude '/home/user/file.o' but not '/home/user/file.odt': $ borg create -e '*.o' backup / # Exclude '/home/user/junk' and '/home/user/subdir/junk' but # not '/home/user/importantjunk' or '/etc/junk': $ borg create -e '/home/*/junk' backup / # Exclude the contents of '/home/user/cache' but not the directory itself: $ borg create -e /home/user/cache/ backup / # The file '/home/user/cache/important' is *not* backed up: $ borg create -e /home/user/cache/ backup / /home/user/cache/important # The contents of directories in '/home' are not backed up when their name # ends in '.tmp' $ borg create --exclude 're:^/home/[^/]+\.tmp/' backup / # Load exclusions from file $ cat >exclude.txt <`_, e.g. {now:%Y-%m-%d_%H:%M:%S} {utcnow} The current UTC date and time, by default in ISO-8601 format. You can also supply your own `format string `_, e.g. {utcnow:%Y-%m-%d_%H:%M:%S} {user} The user name (or UID, if no name is available) of the user running borg. {pid} The current process ID. {borgversion} The version of borg, e.g.: 1.0.8rc1 {borgmajor} The version of borg, only the major version, e.g.: 1 {borgminor} The version of borg, only major and minor version, e.g.: 1.0 {borgpatch} The version of borg, only major, minor and patch version, e.g.: 1.0.8 If literal curly braces need to be used, double them for escaping:: borg create /path/to/repo::{{literal_text}} Examples:: borg create /path/to/repo::{hostname}-{user}-{utcnow} ... borg create /path/to/repo::{hostname}-{now:%Y-%m-%d_%H:%M:%S} ... borg prune --prefix '{hostname}-' ... .. note:: systemd uses a difficult, non-standard syntax for command lines in unit files (refer to the `systemd.unit(5)` manual page). When invoking borg from unit files, pay particular attention to escaping, especially when using the now/utcnow placeholders, since systemd performs its own %-based variable replacement even in quoted text. To avoid interference from systemd, double all percent signs (``{hostname}-{now:%Y-%m-%d_%H:%M:%S}`` becomes ``{hostname}-{now:%%Y-%%m-%%d_%%H:%%M:%%S}``). .. _borg_compression: borg help compression ~~~~~~~~~~~~~~~~~~~~~ It is no problem to mix different compression methods in one repo, deduplication is done on the source data chunks (not on the compressed or encrypted data). If some specific chunk was once compressed and stored into the repo, creating another backup that also uses this chunk will not change the stored chunk. So if you use different compression specs for the backups, whichever stores a chunk first determines its compression. See also borg recreate. Compression is lz4 by default. If you want something else, you have to specify what you want. Valid compression specifiers are: none Do not compress. lz4 Use lz4 compression. Very high speed, very low compression. (default) zstd[,L] Use zstd ("zstandard") compression, a modern wide-range algorithm. If you do not explicitely give the compression level L (ranging from 1 to 22), it will use level 3. Archives compressed with zstd are not compatible with borg < 1.1.4. zlib[,L] Use zlib ("gz") compression. Medium speed, medium compression. If you do not explicitely give the compression level L (ranging from 0 to 9), it will use level 6. Giving level 0 (means "no compression", but still has zlib protocol overhead) is usually pointless, you better use "none" compression. lzma[,L] Use lzma ("xz") compression. Low speed, high compression. If you do not explicitely give the compression level L (ranging from 0 to 9), it will use level 6. Giving levels above 6 is pointless and counterproductive because it does not compress better due to the buffer size used by borg - but it wastes lots of CPU cycles and RAM. auto,C[,L] Use a built-in heuristic to decide per chunk whether to compress or not. The heuristic tries with lz4 whether the data is compressible. For incompressible data, it will not use compression (uses "none"). For compressible data, it uses the given C[,L] compression - with C[,L] being any valid compression specifier. Examples:: borg create --compression lz4 REPO::ARCHIVE data borg create --compression zstd REPO::ARCHIVE data borg create --compression zstd,10 REPO::ARCHIVE data borg create --compression zlib REPO::ARCHIVE data borg create --compression zlib,1 REPO::ARCHIVE data borg create --compression auto,lzma,6 REPO::ARCHIVE data borg create --compression auto,lzma ... borgbackup-1.1.15/docs/usage/diff.rst.inc0000644000175000017500000001702013771325506020153 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_diff: borg diff --------- .. code-block:: none borg [common options] diff [options] REPO::ARCHIVE1 ARCHIVE2 [PATH...] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``REPO::ARCHIVE1`` | repository location and ARCHIVE1 name | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``ARCHIVE2`` | ARCHIVE2 name (no repository location allowed) | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``PATH`` | paths of items inside the archives to compare; patterns are supported | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``--numeric-owner`` | only consider numeric user and group identifiers | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``--same-chunker-params`` | Override check of chunker parameters. | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``--sort`` | Sort the output lines by file path. | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | **Exclusion options** | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPO::ARCHIVE1 repository location and ARCHIVE1 name ARCHIVE2 ARCHIVE2 name (no repository location allowed) PATH paths of items inside the archives to compare; patterns are supported optional arguments --numeric-owner only consider numeric user and group identifiers --same-chunker-params Override check of chunker parameters. --sort Sort the output lines by file path. :ref:`common_options` | Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line Description ~~~~~~~~~~~ This command finds differences (file contents, user/group/mode) between archives. A repository location and an archive name must be specified for REPO::ARCHIVE1. ARCHIVE2 is just another archive name in same repository (no repository location allowed). For archives created with Borg 1.1 or newer diff automatically detects whether the archives are created with the same chunker params. If so, only chunk IDs are compared, which is very fast. For archives prior to Borg 1.1 chunk contents are compared by default. If you did not create the archives with different chunker params, pass ``--same-chunker-params``. Note that the chunker params changed from Borg 0.xx to 1.0. See the output of the "borg help patterns" command for more help on exclude patterns.borgbackup-1.1.15/docs/usage/mount.rst.inc0000644000175000017500000003657013771325506020420 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_mount: borg mount ---------- .. code-block:: none borg [common options] mount [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] .. only:: html .. class:: borg-options-table +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to mount | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``MOUNTPOINT`` | where to mount filesystem | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to extract; patterns are supported | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-f``, ``--foreground`` | stay in foreground, do not daemonize | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-o`` | Extra mount options | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive filters** — Archive filters can be applied to repository targets. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--sort-by KEYS`` | Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--first N`` | consider first N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--last N`` | consider last N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Exclusion options** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--strip-components NUMBER`` | Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY_OR_ARCHIVE repository or archive to mount MOUNTPOINT where to mount filesystem PATH paths to extract; patterns are supported optional arguments -f, --foreground stay in foreground, do not daemonize -o Extra mount options :ref:`common_options` | Archive filters -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. --sort-by KEYS Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp --first N consider first N archives after other filters were applied --last N consider last N archives after other filters were applied Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line --strip-components NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. Description ~~~~~~~~~~~ This command mounts an archive as a FUSE filesystem. This can be useful for browsing an archive or restoring individual files. Unless the ``--foreground`` option is given the command will run in the background until the filesystem is ``umounted``. The command ``borgfs`` provides a wrapper for ``borg mount``. This can also be used in fstab entries: ``/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0`` To allow a regular user to use fstab entries, add the ``user`` option: ``/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0`` For FUSE configuration and mount options, see the mount.fuse(8) manual page. Additional mount options supported by borg: - versions: when used with a repository mount, this gives a merged, versioned view of the files in the archives. EXPERIMENTAL, layout may change in future. - allow_damaged_files: by default damaged files (where missing chunks were replaced with runs of zeros by borg check ``--repair``) are not readable and return EIO (I/O error). Set this option to read such files. - ignore_permissions: for security reasons the "default_permissions" mount option is internally enforced by borg. "ignore_permissions" can be given to not enforce "default_permissions". The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users to tweak the performance. It sets the number of cached data chunks; additional memory usage can be up to ~8 MiB times this number. The default is the number of CPU cores. When the daemonized process receives a signal or crashes, it does not unmount. Unmounting in these cases could cause an active rsync or similar process to unintentionally delete data. When running in the foreground ^C/SIGINT unmounts cleanly, but other signals or crashes do not.borgbackup-1.1.15/docs/usage/umount.rst.inc0000644000175000017500000000352113771325506020573 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_umount: borg umount ----------- .. code-block:: none borg [common options] umount [options] MOUNTPOINT .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+----------------------------------------+ | **positional arguments** | +-------------------------------------------------------+----------------+----------------------------------------+ | | ``MOUNTPOINT`` | mountpoint of the filesystem to umount | +-------------------------------------------------------+----------------+----------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+----------------------------------------+ .. raw:: html .. only:: latex MOUNTPOINT mountpoint of the filesystem to umount :ref:`common_options` | Description ~~~~~~~~~~~ This command un-mounts a FUSE filesystem that was mounted with ``borg mount``. This is a convenience wrapper that just calls the platform-specific shell command - usually this is either umount or fusermount -u.borgbackup-1.1.15/docs/usage/list.rst0000644000175000017500000000303313771325506017445 0ustar useruser00000000000000.. include:: list.rst.inc Examples ~~~~~~~~ :: $ borg list /path/to/repo Monday Mon, 2016-02-15 19:15:11 repo Mon, 2016-02-15 19:26:54 root-2016-02-15 Mon, 2016-02-15 19:36:29 newname Mon, 2016-02-15 19:50:19 ... $ borg list /path/to/repo::root-2016-02-15 drwxr-xr-x root root 0 Mon, 2016-02-15 17:44:27 . drwxrwxr-x root root 0 Mon, 2016-02-15 19:04:49 bin -rwxr-xr-x root root 1029624 Thu, 2014-11-13 00:08:51 bin/bash lrwxrwxrwx root root 0 Fri, 2015-03-27 20:24:26 bin/bzcmp -> bzdiff -rwxr-xr-x root root 2140 Fri, 2015-03-27 20:24:22 bin/bzdiff ... $ borg list /path/to/repo::root-2016-02-15 --pattern "- bin/ba*" drwxr-xr-x root root 0 Mon, 2016-02-15 17:44:27 . drwxrwxr-x root root 0 Mon, 2016-02-15 19:04:49 bin lrwxrwxrwx root root 0 Fri, 2015-03-27 20:24:26 bin/bzcmp -> bzdiff -rwxr-xr-x root root 2140 Fri, 2015-03-27 20:24:22 bin/bzdiff ... $ borg list /path/to/repo::archiveA --format="{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NEWLINE}" drwxrwxr-x user user 0 Sun, 2015-02-01 11:00:00 . drwxrwxr-x user user 0 Sun, 2015-02-01 11:00:00 code drwxrwxr-x user user 0 Sun, 2015-02-01 11:00:00 code/myproject -rw-rw-r-- user user 1416192 Sun, 2015-02-01 11:00:00 code/myproject/file.ext ... borgbackup-1.1.15/docs/usage/info.rst.inc0000644000175000017500000002152013771325506020176 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_info: borg info --------- .. code-block:: none borg [common options] info [options] [REPOSITORY_OR_ARCHIVE] .. only:: html .. class:: borg-options-table +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to display information about | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--json`` | format output as JSON | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive filters** — Archive filters can be applied to repository targets. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--sort-by KEYS`` | Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--first N`` | consider first N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--last N`` | consider last N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY_OR_ARCHIVE repository or archive to display information about optional arguments --json format output as JSON :ref:`common_options` | Archive filters -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. --sort-by KEYS Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp --first N consider first N archives after other filters were applied --last N consider last N archives after other filters were applied Description ~~~~~~~~~~~ This command displays detailed information about the specified archive or repository. Please note that the deduplicated sizes of the individual archives do not add up to the deduplicated size of the repository ("all archives"), because the two are meaning different things: This archive / deduplicated size = amount of data stored ONLY for this archive = unique chunks of this archive. All archives / deduplicated size = amount of data stored in the repo = all chunks in the repository. Borg archives can only contain a limited amount of file metadata. The size of an archive relative to this limit depends on a number of factors, mainly the number of files, the lengths of paths and other metadata stored for files. This is shown as *utilization of maximum supported archive size*.borgbackup-1.1.15/docs/usage/serve.rst0000644000175000017500000000774413771325506017633 0ustar useruser00000000000000.. include:: serve.rst.inc Examples ~~~~~~~~ borg serve has special support for ssh forced commands (see ``authorized_keys`` example below): it will detect that you use such a forced command and extract the value of the ``--restrict-to-path`` option(s). It will then parse the original command that came from the client, makes sure that it is also ``borg serve`` and enforce path restriction(s) as given by the forced command. That way, other options given by the client (like ``--info`` or ``--umask``) are preserved (and are not fixed by the forced command). Environment variables (such as BORG_HOSTNAME_IS_UNIQUE) contained in the original command sent by the client are *not* interpreted, but ignored. If BORG_XXX environment variables should be set on the ``borg serve`` side, then these must be set in system-specific locations like ``/etc/environment`` or in the forced command itself (example below). :: # Allow an SSH keypair to only run borg, and only have access to /path/to/repo. # Use key options to disable unneeded and potentially dangerous SSH functionality. # This will help to secure an automated remote backup system. $ cat ~/.ssh/authorized_keys command="borg serve --restrict-to-path /path/to/repo",restrict ssh-rsa AAAAB3[...] # Set a BORG_XXX environment variable on the "borg serve" side $ cat ~/.ssh/authorized_keys command="export BORG_XXX=value; borg serve [...]",restrict ssh-rsa [...] .. note:: The examples above use the ``restrict`` directive. This does automatically block potential dangerous ssh features, even when they are added in a future update. Thus, this option should be preferred. If you're using openssh-server < 7.2, however, you have to explicitly specify the ssh features to restrict and cannot simply use the restrict option as it has been introduced in v7.2. We recommend to use ``no-port-forwarding,no-X11-forwarding,no-pty,no-agent-forwarding,no-user-rc`` in this case. SSH Configuration ~~~~~~~~~~~~~~~~~ ``borg serve``'s pipes (``stdin``/``stdout``/``stderr``) are connected to the ``sshd`` process on the server side. In the event that the SSH connection between ``borg serve`` and the client is disconnected or stuck abnormally (for example, due to a network outage), it can take a long time for ``sshd`` to notice the client is disconnected. In the meantime, ``sshd`` continues running, and as a result so does the ``borg serve`` process holding the lock on the repository. This can cause subsequent ``borg`` operations on the remote repository to fail with the error: ``Failed to create/acquire the lock``. In order to avoid this, it is recommended to perform the following additional SSH configuration: Either in the client side's ``~/.ssh/config`` file, or in the client's ``/etc/ssh/ssh_config`` file: :: Host backupserver ServerAliveInterval 10 ServerAliveCountMax 30 Replacing ``backupserver`` with the hostname, FQDN or IP address of the borg server. This will cause the client to send a keepalive to the server every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the ssh client process will be terminated, causing the borg process to terminate gracefully. On the server side's ``sshd`` configuration file (typically ``/etc/ssh/sshd_config``): :: ClientAliveInterval 10 ClientAliveCountMax 30 This will cause the server to send a keep alive to the client every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the server's sshd process will be terminated, causing the ``borg serve`` process to terminate gracefully and release the lock on the repository. If you then run borg commands with ``--lock-wait 600``, this gives sufficient time for the borg serve processes to terminate after the SSH connection is torn down after the 300 second wait for the keepalives to fail. You may, of course, modify the timeout values demonstrated above to values that suit your environment and use case. borgbackup-1.1.15/docs/usage/help.rst0000644000175000017500000000010113771325506017413 0ustar useruser00000000000000Miscellaneous Help ------------------ .. include:: help.rst.inc borgbackup-1.1.15/docs/usage/rename.rst0000644000175000017500000000053013771325506017740 0ustar useruser00000000000000.. include:: rename.rst.inc Examples ~~~~~~~~ :: $ borg create /path/to/repo::archivename ~ $ borg list /path/to/repo archivename Mon, 2016-02-15 19:50:19 $ borg rename /path/to/repo::archivename newname $ borg list /path/to/repo newname Mon, 2016-02-15 19:50:19 borgbackup-1.1.15/docs/usage/tar.rst0000644000175000017500000000130213771325506017255 0ustar useruser00000000000000.. include:: export-tar.rst.inc Examples ~~~~~~~~ :: # export as uncompressed tar $ borg export-tar /path/to/repo::Monday Monday.tar # exclude some types, compress using gzip $ borg export-tar /path/to/repo::Monday Monday.tar.gz --exclude '*.so' # use higher compression level with gzip $ borg export-tar --tar-filter="gzip -9" testrepo::linux Monday.tar.gz # export a tar, but instead of storing it on disk, # upload it to a remote site using curl. $ borg export-tar /path/to/repo::Monday - | curl --data-binary @- https://somewhere/to/POST # remote extraction via "tarpipe" $ borg export-tar /path/to/repo::Monday - | ssh somewhere "cd extracted; tar x" borgbackup-1.1.15/docs/usage/common-options.rst.inc0000644000175000017500000000260513771325506022227 0ustar useruser00000000000000-h, --help show this help message and exit --critical work on log level CRITICAL --error work on log level ERROR --warning work on log level WARNING (default) --info, -v, --verbose work on log level INFO --debug enable debug output, work on log level DEBUG --debug-topic TOPIC enable TOPIC debugging (can be specified multiple times). The logger path is borg.debug. if TOPIC is not fully qualified. -p, --progress show progress information --log-json Output one JSON object per log line instead of formatted text. --lock-wait SECONDS wait at most SECONDS for acquiring a repository/cache lock (default: 1). --bypass-lock Bypass locking mechanism --show-version show/log the borg version --show-rc show/log the return code (rc) --umask M set umask to M (local and remote, default: 0077) --remote-path PATH use PATH as borg executable on the remote (default: "borg") --remote-ratelimit RATE set remote network upload rate limit in kiByte/s (default: 0=unlimited) --consider-part-files treat part files like normal files (e.g. to list/extract them) --debug-profile FILE Write execution profile in Borg format into FILE. For local use a Python-compatible file can be generated by suffixing FILE with ".pyprof". --rsh RSH Use this command to connect to the 'borg serve' process (default: 'ssh') borgbackup-1.1.15/docs/usage/check.rst.inc0000644000175000017500000003225513771325506020327 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_check: borg check ---------- .. code-block:: none borg [common options] check [options] [REPOSITORY_OR_ARCHIVE] .. only:: html .. class:: borg-options-table +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to check consistency of | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--repository-only`` | only perform repository checks | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--archives-only`` | only perform archives checks | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--verify-data`` | perform cryptographic archive data integrity verification (conflicts with ``--repository-only``) | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--repair`` | attempt to repair any inconsistencies found | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--save-space`` | work slower, but using less space | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive filters** — Archive filters can be applied to repository targets. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--sort-by KEYS`` | Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--first N`` | consider first N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--last N`` | consider last N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY_OR_ARCHIVE repository or archive to check consistency of optional arguments --repository-only only perform repository checks --archives-only only perform archives checks --verify-data perform cryptographic archive data integrity verification (conflicts with ``--repository-only``) --repair attempt to repair any inconsistencies found --save-space work slower, but using less space :ref:`common_options` | Archive filters -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. --sort-by KEYS Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp --first N consider first N archives after other filters were applied --last N consider last N archives after other filters were applied Description ~~~~~~~~~~~ The check command verifies the consistency of a repository and the corresponding archives. check --repair is a potentially dangerous function and might lead to data loss (for kinds of corruption it is not capable of dealing with). BE VERY CAREFUL! First, the underlying repository data files are checked: - For all segments, the segment magic header is checked. - For all objects stored in the segments, all metadata (e.g. CRC and size) and all data is read. The read data is checked by size and CRC. Bit rot and other types of accidental damage can be detected this way. - In repair mode, if an integrity error is detected in a segment, try to recover as many objects from the segment as possible. - In repair mode, make sure that the index is consistent with the data stored in the segments. - If checking a remote repo via ``ssh:``, the repo check is executed on the server without causing significant network traffic. - The repository check can be skipped using the ``--archives-only`` option. Second, the consistency and correctness of the archive metadata is verified: - Is the repo manifest present? If not, it is rebuilt from archive metadata chunks (this requires reading and decrypting of all metadata and data). - Check if archive metadata chunk is present; if not, remove archive from manifest. - For all files (items) in the archive, for all chunks referenced by these files, check if chunk is present. In repair mode, if a chunk is not present, replace it with a same-size replacement chunk of zeroes. If a previously lost chunk reappears (e.g. via a later backup), in repair mode the all-zero replacement chunk will be replaced by the correct chunk. This requires reading of archive and file metadata, but not data. - In repair mode, when all the archives were checked, orphaned chunks are deleted from the repo. One cause of orphaned chunks are input file related errors (like read errors) in the archive creation process. - If checking a remote repo via ``ssh:``, the archive check is executed on the client machine because it requires decryption, and this is always done client-side as key access is needed. - The archive checks can be time consuming; they can be skipped using the ``--repository-only`` option. The ``--verify-data`` option will perform a full integrity verification (as opposed to checking the CRC32 of the segment) of data, which means reading the data from the repository, decrypting and decompressing it. This is a cryptographic verification, which will detect (accidental) corruption. For encrypted repositories it is tamper-resistant as well, unless the attacker has access to the keys. It is also very slow.borgbackup-1.1.15/docs/usage/key_export.rst.inc0000644000175000017500000001051713771325506021440 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_key_export: borg key export --------------- .. code-block:: none borg [common options] key export [options] [REPOSITORY] [PATH] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ | **positional arguments** | +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ | | ``REPOSITORY`` | | +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ | | ``PATH`` | where to store the backup | +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ | **optional arguments** | +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ | | ``--paper`` | Create an export suitable for printing and later type-in | +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ | | ``--qr-html`` | Create an html file suitable for printing and later type-in or qr scan | +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY PATH where to store the backup optional arguments --paper Create an export suitable for printing and later type-in --qr-html Create an html file suitable for printing and later type-in or qr scan :ref:`common_options` | Description ~~~~~~~~~~~ If repository encryption is used, the repository is inaccessible without the key. This command allows to backup this essential key. Note that the backup produced does not include the passphrase itself (i.e. the exported key stays encrypted). In order to regain access to a repository, one needs both the exported key and the original passphrase. There are two backup formats. The normal backup format is suitable for digital storage as a file. The ``--paper`` backup format is optimized for printing and typing in while importing, with per line checks to reduce problems with manual input. For repositories using keyfile encryption the key is saved locally on the system that is capable of doing backups. To guard against loss of this key, the key needs to be backed up independently of the main data backup. For repositories using the repokey encryption the key is saved in the repository in the config file. A backup is thus not strictly needed, but guards against the repository becoming inaccessible if the file is damaged for some reason.borgbackup-1.1.15/docs/usage/change-passphrase.rst.inc0000644000175000017500000000331413771325506022640 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_change-passphrase: borg change-passphrase ---------------------- .. code-block:: none borg [common options] change-passphrase [options] [REPOSITORY] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+--+ | **positional arguments** | +-------------------------------------------------------+----------------+--+ | | ``REPOSITORY`` | | +-------------------------------------------------------+----------------+--+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+--+ .. raw:: html .. only:: latex REPOSITORY :ref:`common_options` | Description ~~~~~~~~~~~ The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. Please note that this command only changes the passphrase, but not any secret protected by it (like e.g. encryption/MAC keys or chunker seed). Thus, changing the passphrase after passphrase and borg key got compromised does not protect future (nor past) backups to the same repository.borgbackup-1.1.15/docs/usage/usage_general.rst.inc0000644000175000017500000000101313771325506022037 0ustar useruser00000000000000.. include:: general/positional-arguments.rst.inc .. include:: general/repository-urls.rst.inc .. include:: general/repository-locations.rst.inc .. include:: general/logging.rst.inc .. include:: general/return-codes.rst.inc .. _env_vars: .. include:: general/environment.rst.inc .. _file-systems: .. include:: general/file-systems.rst.inc .. include:: general/units.rst.inc .. include:: general/date-time.rst.inc .. include:: general/resources.rst.inc .. _platforms: .. include:: general/file-metadata.rst.inc borgbackup-1.1.15/docs/usage/key_migrate-to-repokey.rst.inc0000644000175000017500000000412413771325506023640 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_key_migrate-to-repokey: borg key migrate-to-repokey --------------------------- .. code-block:: none borg [common options] key migrate-to-repokey [options] [REPOSITORY] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+--+ | **positional arguments** | +-------------------------------------------------------+----------------+--+ | | ``REPOSITORY`` | | +-------------------------------------------------------+----------------+--+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+--+ .. raw:: html .. only:: latex REPOSITORY :ref:`common_options` | Description ~~~~~~~~~~~ This command migrates a repository from passphrase mode (removed in Borg 1.0) to repokey mode. You will be first asked for the repository passphrase (to open it in passphrase mode). This is the same passphrase as you used to use for this repo before 1.0. It will then derive the different secrets from this passphrase. Then you will be asked for a new passphrase (twice, for safety). This passphrase will be used to protect the repokey (which contains these same secrets in encrypted form). You may use the same passphrase as you used to use, but you may also use a different one. After migrating to repokey mode, you can change the passphrase at any time. But please note: the secrets will always stay the same and they could always be derived from your (old) passphrase-mode passphrase.borgbackup-1.1.15/docs/usage/config.rst0000644000175000017500000000075013771325506017742 0ustar useruser00000000000000.. include:: config.rst.inc .. note:: The repository & cache config files are some of the only directly manipulable parts of a repository that aren't versioned or backed up, so be careful when making changes\! Examples ~~~~~~~~ :: # find cache directory $ cd ~/.cache/borg/$(borg config /path/to/repo id) # reserve some space $ borg config /path/to/repo additional_free_space 2G # make a repo append-only $ borg config /path/to/repo append_only 1 borgbackup-1.1.15/docs/usage/init.rst0000644000175000017500000000121213771325506017432 0ustar useruser00000000000000.. include:: init.rst.inc Examples ~~~~~~~~ :: # Local repository, repokey encryption, BLAKE2b (often faster, since Borg 1.1) $ borg init --encryption=repokey-blake2 /path/to/repo # Local repository (no encryption) $ borg init --encryption=none /path/to/repo # Remote repository (accesses a remote borg via ssh) # repokey: stores the (encrypted) key into /config $ borg init --encryption=repokey-blake2 user@hostname:backup # Remote repository (accesses a remote borg via ssh) # keyfile: stores the (encrypted) key into ~/.config/borg/keys/ $ borg init --encryption=keyfile user@hostname:backup borgbackup-1.1.15/docs/usage/borgfs.rst.inc0000644000175000017500000003737513771325506020544 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_borgfs: borg borgfs ----------- .. code-block:: none borg [common options] borgfs [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] .. only:: html .. class:: borg-options-table +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``REPOSITORY_OR_ARCHIVE`` | repository/archive to mount | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``MOUNTPOINT`` | where to mount filesystem | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to extract; patterns are supported | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-V``, ``--version`` | show version number and exit | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-f``, ``--foreground`` | stay in foreground, do not daemonize | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-o`` | Extra mount options | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Archive filters** — Archive filters can be applied to repository targets. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--sort-by KEYS`` | Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--first N`` | consider first N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--last N`` | consider last N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **Exclusion options** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--strip-components NUMBER`` | Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html .. only:: latex REPOSITORY_OR_ARCHIVE repository/archive to mount MOUNTPOINT where to mount filesystem PATH paths to extract; patterns are supported optional arguments -V, --version show version number and exit -f, --foreground stay in foreground, do not daemonize -o Extra mount options :ref:`common_options` | Archive filters -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. --sort-by KEYS Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp --first N consider first N archives after other filters were applied --last N consider last N archives after other filters were applied Exclusion options -e PATTERN, --exclude PATTERN exclude paths matching PATTERN --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line --strip-components NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. Description ~~~~~~~~~~~ This command mounts an archive as a FUSE filesystem. This can be useful for browsing an archive or restoring individual files. Unless the ``--foreground`` option is given the command will run in the background until the filesystem is ``umounted``. The command ``borgfs`` provides a wrapper for ``borg mount``. This can also be used in fstab entries: ``/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0`` To allow a regular user to use fstab entries, add the ``user`` option: ``/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0`` For mount options, see the fuse(8) manual page. Additional mount options supported by borg: - versions: when used with a repository mount, this gives a merged, versioned view of the files in the archives. EXPERIMENTAL, layout may change in future. - allow_damaged_files: by default damaged files (where missing chunks were replaced with runs of zeros by borg check ``--repair``) are not readable and return EIO (I/O error). Set this option to read such files. The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users to tweak the performance. It sets the number of cached data chunks; additional memory usage can be up to ~8 MiB times this number. The default is the number of CPU cores. When the daemonized process receives a signal or crashes, it does not unmount. Unmounting in these cases could cause an active rsync or similar process to unintentionally delete data. When running in the foreground ^C/SIGINT unmounts cleanly, but other signals or crashes do not.borgbackup-1.1.15/docs/usage/key_change-passphrase.rst.inc0000644000175000017500000000333413771325506023512 0ustar useruser00000000000000.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! .. _borg_key_change-passphrase: borg key change-passphrase -------------------------- .. code-block:: none borg [common options] key change-passphrase [options] [REPOSITORY] .. only:: html .. class:: borg-options-table +-------------------------------------------------------+----------------+--+ | **positional arguments** | +-------------------------------------------------------+----------------+--+ | | ``REPOSITORY`` | | +-------------------------------------------------------+----------------+--+ | .. class:: borg-common-opt-ref | | | | :ref:`common_options` | +-------------------------------------------------------+----------------+--+ .. raw:: html .. only:: latex REPOSITORY :ref:`common_options` | Description ~~~~~~~~~~~ The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. Please note that this command only changes the passphrase, but not any secret protected by it (like e.g. encryption/MAC keys or chunker seed). Thus, changing the passphrase after passphrase and borg key got compromised does not protect future (nor past) backups to the same repository.borgbackup-1.1.15/docs/usage/debug.rst0000644000175000017500000000330213771325506017557 0ustar useruser00000000000000Debugging Facilities -------------------- There is a ``borg debug`` command that has some subcommands which are all **not intended for normal use** and **potentially very dangerous** if used incorrectly. For example, ``borg debug put-obj`` and ``borg debug delete-obj`` will only do what their name suggests: put objects into repo / delete objects from repo. Please note: - they will not update the chunks cache (chunks index) about the object - they will not update the manifest (so no automatic chunks index resync is triggered) - they will not check whether the object is in use (e.g. before delete-obj) - they will not update any metadata which may point to the object They exist to improve debugging capabilities without direct system access, e.g. in case you ever run into some severe malfunction. Use them only if you know what you are doing or if a trusted Borg developer tells you what to do. Borg has a ``--debug-topic TOPIC`` option to enable specific debugging messages. Topics are generally not documented. A ``--debug-profile FILE`` option exists which writes a profile of the main program's execution to a file. The format of these files is not directly compatible with the Python profiling tools, since these use the "marshal" format, which is not intended to be secure (quoting the Python docs: "Never unmarshal data received from an untrusted or unauthenticated source."). The ``borg debug profile-convert`` command can be used to take a Borg profile and convert it to a profile file that is compatible with the Python tools. Additionally, if the filename specified for ``--debug-profile`` ends with ".pyprof" a Python compatible profile is generated. This is only intended for local use by developers. borgbackup-1.1.15/docs/global.rst.inc0000644000175000017500000000315313771325506017401 0ustar useruser00000000000000.. highlight:: bash .. |project_name| replace:: Borg .. |package_dirname| replace:: borgbackup-|version| .. |package_filename| replace:: |package_dirname|.tar.gz .. |package_url| replace:: https://pypi.python.org/packages/source/b/borgbackup/|package_filename| .. |git_url| replace:: https://github.com/borgbackup/borg.git .. _github: https://github.com/borgbackup/borg .. _issue tracker: https://github.com/borgbackup/borg/issues .. _deduplication: https://en.wikipedia.org/wiki/Data_deduplication .. _AES: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard .. _HMAC-SHA256: https://en.wikipedia.org/wiki/HMAC .. _SHA256: https://en.wikipedia.org/wiki/SHA-256 .. _PBKDF2: https://en.wikipedia.org/wiki/PBKDF2 .. _ACL: https://en.wikipedia.org/wiki/Access_control_list .. _libacl: https://savannah.nongnu.org/projects/acl/ .. _libattr: https://savannah.nongnu.org/projects/attr/ .. _liblz4: https://github.com/Cyan4973/lz4 .. _libzstd: https://github.com/facebook/zstd .. _libb2: https://github.com/BLAKE2/libb2 .. _OpenSSL: https://www.openssl.org/ .. _`Python 3`: https://www.python.org/ .. _Buzhash: https://en.wikipedia.org/wiki/Buzhash .. _msgpack: https://msgpack.org/ .. _`msgpack-python`: https://pypi.python.org/pypi/msgpack-python/ .. _llfuse: https://pypi.python.org/pypi/llfuse/ .. _homebrew: http://brew.sh/ .. _userspace filesystems: https://en.wikipedia.org/wiki/Filesystem_in_Userspace .. _Cython: http://cython.org/ .. _virtualenv: https://pypi.python.org/pypi/virtualenv/ .. _mailing list discussion about internals: http://librelist.com/browser/attic/2014/5/6/questions-and-suggestions-about-inner-working-of-attic> borgbackup-1.1.15/docs/changes.rst0000644000175000017500000044366113771325506017015 0ustar useruser00000000000000.. _important_notes: Important notes =============== This section provides information about security and corruption issues. .. _hashindex_set_bug: Pre-1.1.11 potential index corruption / data loss issue ------------------------------------------------------- A bug was discovered in our hashtable code, see issue #4829. The code is used for the client-side chunks cache and the server-side repo index. Although borg uses the hashtables very heavily, the index corruption did not happen too frequently, because it needed specific conditions to happen. Data loss required even more specific conditions, so it should be rare (and also detectable via borg check). You might be affected if borg crashed with / complained about: - AssertionError: Corrupted segment reference count - corrupted index or hints - ObjectNotFound: Object with key ... not found in repository ... - Index mismatch for key b'...'. (..., ...) != (-1, -1) - ValueError: stats_against: key contained in self but not in master_index. Advised procedure to fix any related issue in your indexes/caches: - install fixed borg code (on client AND server) - for all of your clients and repos remove the cache by: borg delete --cache-only YOURREPO (later, the cache will be re-built automatically) - for all your repos, rebuild the repo index by: borg check --repair YOURREPO This will also check all archives and detect if there is any data-loss issue. Affected branches / releases: - fd06497 introduced the bug into 1.1-maint branch - it affects all borg 1.1.x since 1.1.0b4. - fd06497 introduced the bug into master branch - it affects all borg 1.2.0 alpha releases. - c5cd882 introduced the bug into 1.0-maint branch - it affects all borg 1.0.x since 1.0.11rc1. The bug was fixed by: - 701159a fixes the bug in 1.1-maint branch - will be released with borg 1.1.11. - fa63150 fixes the bug in master branch - will be released with borg 1.2.0a8. - 7bb90b6 fixes the bug in 1.0-maint branch. Branch is EOL, no new release is planned as of now. .. _broken_validator: Pre-1.1.4 potential data corruption issue ----------------------------------------- A data corruption bug was discovered in borg check --repair, see issue #3444. This is a 1.1.x regression, releases < 1.1 (e.g. 1.0.x) are not affected. To avoid data loss, you must not run borg check --repair using an unfixed version of borg 1.1.x. The first official release that has the fix is 1.1.4. Package maintainers may have applied the fix to updated packages of 1.1.x (x<4) though, see the package maintainer's package changelog to make sure. If you never had missing item metadata chunks, the bug has not affected you even if you did run borg check --repair with an unfixed version. When borg check --repair tried to repair corrupt archives that miss item metadata chunks, the resync to valid metadata in still present item metadata chunks malfunctioned. This was due to a broken validator that considered all (even valid) item metadata as invalid. As they were considered invalid, borg discarded them. Practically, that means the affected files, directories or other fs objects were discarded from the archive. Due to the malfunction, the process was extremely slow, but if you let it complete, borg would have created a "repaired" archive that has lost a lot of items. If you interrupted borg check --repair because it was so strangely slow (killing borg somehow, e.g. Ctrl-C) the transaction was rolled back and no corruption occurred. The log message indicating the precondition for the bug triggering looks like: item metadata chunk missing [chunk: 001056_bdee87d...a3e50d] If you never had that in your borg check --repair runs, you're not affected. But if you're unsure or you actually have seen that, better check your archives. By just using "borg list repo::archive" you can see if all expected filesystem items are listed. .. _tam_vuln: Pre-1.0.9 manifest spoofing vulnerability (CVE-2016-10099) ---------------------------------------------------------- A flaw in the cryptographic authentication scheme in Borg allowed an attacker to spoof the manifest. The attack requires an attacker to be able to 1. insert files (with no additional headers) into backups 2. gain write access to the repository This vulnerability does not disclose plaintext to the attacker, nor does it affect the authenticity of existing archives. The vulnerability allows an attacker to create a spoofed manifest (the list of archives). Creating plausible fake archives may be feasible for small archives, but is unlikely for large archives. The fix adds a separate authentication tag to the manifest. For compatibility with prior versions this authentication tag is *not* required by default for existing repositories. Repositories created with 1.0.9 and later require it. Steps you should take: 1. Upgrade all clients to 1.0.9 or later. 2. Run ``borg upgrade --tam `` *on every client* for *each* repository. 3. This will list all archives, including archive IDs, for easy comparison with your logs. 4. Done. Prior versions can access and modify repositories with this measure enabled, however, to 1.0.9 or later their modifications are indiscernible from an attack and will raise an error until the below procedure is followed. We are aware that this can be be annoying in some circumstances, but don't see a way to fix the vulnerability otherwise. In case a version prior to 1.0.9 is used to modify a repository where above procedure was completed, and now you get an error message from other clients: 1. ``borg upgrade --tam --force `` once with *any* client suffices. This attack is mitigated by: - Noting/logging ``borg list``, ``borg info``, or ``borg create --stats``, which contain the archive IDs. We are not aware of others having discovered, disclosed or exploited this vulnerability. Vulnerability time line: * 2016-11-14: Vulnerability and fix discovered during review of cryptography by Marian Beermann (@enkore) * 2016-11-20: First patch * 2016-12-20: Released fixed version 1.0.9 * 2017-01-02: CVE was assigned * 2017-01-15: Released fixed version 1.1.0b3 (fix was previously only available from source) .. _attic013_check_corruption: Pre-1.0.9 potential data loss ----------------------------- If you have archives in your repository that were made with attic <= 0.13 (and later migrated to borg), running borg check would report errors in these archives. See issue #1837. The reason for this is a invalid (and useless) metadata key that was always added due to a bug in these old attic versions. If you run borg check --repair, things escalate quickly: all archive items with invalid metadata will be killed. Due to that attic bug, that means all items in all archives made with these old attic versions. Pre-1.0.4 potential repo corruption ----------------------------------- Some external errors (like network or disk I/O errors) could lead to corruption of the backup repository due to issue #1138. A sign that this happened is if "E" status was reported for a file that can not be explained by problems with the source file. If you still have logs from "borg create -v --list", you can check for "E" status. Here is what could cause corruption and what you can do now: 1) I/O errors (e.g. repo disk errors) while writing data to repo. This could lead to corrupted segment files. Fix:: # check for corrupt chunks / segments: borg check -v --repository-only REPO # repair the repo: borg check -v --repository-only --repair REPO # make sure everything is fixed: borg check -v --repository-only REPO 2) Unreliable network / unreliable connection to the repo. This could lead to archive metadata corruption. Fix:: # check for corrupt archives: borg check -v --archives-only REPO # delete the corrupt archives: borg delete --force REPO::CORRUPT_ARCHIVE # make sure everything is fixed: borg check -v --archives-only REPO 3) In case you want to do more intensive checking. The best check that everything is ok is to run a dry-run extraction:: borg extract -v --dry-run REPO::ARCHIVE .. _changelog: Changelog ========= Version 1.1.15 (2020-12-25) --------------------------- Compatibility notes: - When upgrading from borg 1.0.x to 1.1.x, please note: - read all the compatibility notes for 1.1.0*, starting from 1.1.0b1. - borg upgrade: you do not need to and you also should not run it. - borg might ask some security-related questions once after upgrading. You can answer them either manually or via environment variable. One known case is if you use unencrypted repositories, then it will ask about a unknown unencrypted repository one time. - your first backup with 1.1.x might be significantly slower (it might completely read, chunk, hash a lot files) - this is due to the --files-cache mode change (and happens every time you change mode). You can avoid the one-time slowdown by using the pre-1.1.0rc4-compatible mode (but that is less safe for detecting changed files than the default). See the --files-cache docs for details. - 1.1.11 removes WSL autodetection (Windows 10 Subsystem for Linux). If WSL still has a problem with sync_file_range, you need to set BORG_WORKAROUNDS=basesyncfile in the borg process environment to work around the WSL issue. - 1.1.14 changes return codes due to a bug fix: In case you have scripts expecting rc == 2 for a signal exit, you need to update them to check for >= 128 (as documented since long). - 1.1.15 drops python 3.4 support, minimum requirement is 3.5 now. Fixes: - extract: - improve exception handling when setting xattrs, #5092. - emit a warning message giving the path, xattr key and error message. - continue trying to restore other xattrs and bsdflags of the same file after an exception with xattr-setting happened. - export-tar: - set tar format to GNU_FORMAT explicitly, #5274 - fix memory leak with ssh: remote repository, #5568 - fix potential memory leak with ssh: remote repository with partial extraction - create: fix --dry-run and --stats coexistence, #5415 - use --timestamp for {utcnow} and {now} if given, #5189 New features: - create: implement --stdin-mode, --stdin-user and --stdin-group, #5333 - allow appending the files cache filename with BORG_FILES_CACHE_SUFFIX env var Other changes: - drop python 3.4 support, minimum requirement is 3.5 now. - enable using libxxhash instead of bundled xxh64 code - update llfuse requirements (1.3.8) - set cython language_level in some files to fix warnings - allow EIO with warning when trying to hardlink - PropDict: fail early if internal_dict is not a dict - update shell completions - tests / CI - add a test for the hashindex corruption bug, #5531 #4829 - fix spurious failure in test_cache_files, #5438 - added a github ci workflow - reduce testing on travis, no macOS, no py3x-dev, #5467 - travis: use newer dists, native py on dist - vagrant: - remove jessie and trusty boxes, #5348 #5383 - pyinstaller 4.0, build on py379 - binary build on stretch64, #5348 - remove easy_install based pip installation - docs: - clarify '--one-file-system' for btrfs, #5391 - add example for excluding content using the --pattern cmd line arg - complement the documentation for pattern files and exclude files, #5524 - made ansible playbook more generic, use package instead of pacman. also change state from "latest" to "present". - complete documentation on append-only remote repos, #5497 - internals: rather talk about target size than statistics, #5336 - new compression algorithm policy, #1633 #5505 - faq: add a hint on sleeping computer, #5301 - note requirements for full disk access on macOS Catalina, #5303 - fix/improve description of borg upgrade hardlink usage, #5518 - modernize 1.1 code: - drop code/workarounds only needed to support Python 3.4 - remove workaround for pre-release py37 argparse bug - removed some outdated comments/docstrings - requirements: remove some restrictions, lock on current versions Version 1.1.14 (2020-10-07) --------------------------- Fixes: - check --repair: fix potential data loss when interrupting it, #5325 - exit with 128 + signal number (as documented) when borg is killed by a signal, #5161 - fix hardlinked CACHEDIR.TAG processing, #4911 - create --read-special: .part files also should be regular files, #5217 - llfuse dependency: choose least broken 1.3.6/1.3.7. 1.3.6 is broken on python 3.9, 1.3.7 is broken on FreeBSD. Other changes: - upgrade bundled xxhash to 0.7.4 - self test: if it fails, also point to OS and hardware, #5334 - pyinstaller: compute basepath from spec file location - prettier error message when archive gets too big, #5307 - check/recreate are not "experimental" any more (but still potentially dangerous): - recreate: remove extra confirmation - rephrase some warnings, update docs, #5164 - shell completions: - misc. updates / fixes - support repositories in fish tab completion, #5256 - complete $BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING - rewrite zsh completion: - completion for almost all optional and positional arguments - completion for Borg environment variables (parameters) - use "allow/deny list" instead of "white/black list" wording - declare "allow_cache_wipe" marker in setup.cfg to avoid pytest warning - vagrant / tests: - misc. fixes / updates - use python 3.5.10 for binary build - build directory-based binaries additionally to the single file binaries - add libffi-dev, required to build python - use cryptography<3.0, more recent versions break the jessie box - test on python 3.9 - do brew update with /dev/null redirect to avoid "too much log output" on travis-ci - docs: - add ssh-agent pull backup method docs, #5288 - how to approach borg speed issues, #5371 - mention double --force in prune docs - update Homebrew install instructions, #5185 - better description of how cache and rebuilds of it work - point to borg create --list item flags in recreate usage, #5165 - add security faq explaining AES-CTR crypto issues, #5254 - add a note to create from stdin regarding files cache, #5180 - fix borg.1 manpage generation regression, #5211 - clarify how exclude options work in recreate, #5193 - add section for retired contributors - hint about not misusing private email addresses of contributors for borg support Version 1.1.13 (2020-06-06) --------------------------- Fixes: - rebuilt using a current Cython version, compatible with python 3.8, #5214 Version 1.1.12 (2020-06-06) --------------------------- Fixes: - fix preload-related memory leak, #5202. - mount / borgfs (FUSE filesystem): - fix FUSE low linear read speed on large files, #5067 - fix crash on old llfuse without birthtime attrs, #5064 - accidentally we required llfuse >= 1.3. Now also old llfuse works again. - set f_namemax in statfs result, #2684 - update precedence of env vars to set config and cache paths, #4894 - correctly calculate compression ratio, taking header size into account, too New features: - --bypass-lock option to bypass locking with read-only repositories Other changes: - upgrade bundled zstd to 1.4.5 - travis: adding comments and explanations to Travis config / install script, improve macOS builds. - tests: test_delete_force: avoid sporadic test setup issues, #5196 - misc. vagrant fixes - the binary for macOS is now built on macOS 10.12 - the binaries for Linux are now built on Debian 8 "Jessie", #3761 - docs: - PlaceholderError not printed as JSON, #4073 - "How important is Borg config?", #4941 - make Sphinx warnings break docs build, #4587 - some markup / warning fixes - add "updating borgbackup.org/releases" to release checklist, #4999 - add "rendering docs" to release checklist, #5000 - clarify borg init's encryption modes - add note about patterns and stored paths, #4160 - add upgrade of tools to pip installation how-to - document one cause of orphaned chunks in check command, #2295 - linked recommended restrictions to ssh public keys on borg servers in faq, #4946 Version 1.1.11 (2020-03-08) --------------------------- Fixes: - fixed potential index corruption / data loss issue due to bug in hashindex_set, #4829. Please read and follow the more detailled notes close to the top of this document. - upgrade bundled xxhash to 0.7.3, #4891. 0.7.2 is the minimum requirement for correct operations on ARMv6 in non-fixup mode, where unaligned memory accesses cause bus errors. 0.7.3 adds some speedups and libxxhash 0.7.3 even has a pkg-config file now. - upgrade bundled lz4 to 1.9.2 - upgrade bundled zstd to 1.4.4 - fix crash when upgrading erroneous hints file, #4922 - extract: - fix KeyError for "partial" extraction, #4607 - fix "partial" extract for hardlinked contentless file types, #4725 - fix preloading for old (0.xx) remote servers, #4652 - fix confusing output of borg extract --list --strip-components, #4934 - delete: after double-force delete, warn about necessary repair, #4704 - create: give invalid repo error msg if repo config not found, #4411 - mount: fix FUSE mount missing st_birthtime, #4763 #4767 - check: do not stumble over invalid item key, #4845 - info: if the archive doesn't exist, print a pretty message, #4793 - SecurityManager.known(): check all files, #4614 - Repository.open: use stat() to check for repo dir, #4695 - Repository.check_can_create_repository: use stat() to check, #4695 - fix invalid archive error message - fix optional/non-optional location arg, #4541 - commit-time free space calc: ignore bad compact map entries, #4796 - ignore EACCES (errno 13) when hardlinking the old config, #4730 - --prefix / -P: fix processing, avoid argparse issue, #4769 New features: - enable placeholder usage in all extra archive arguments - new BORG_WORKAROUNDS mechanism, basesyncfile, #4710 - recreate: support --timestamp option, #4745 - support platforms without os.link (e.g. Android with Termux), #4901. if we don't have os.link, we just extract another copy instead of making a hardlink. - support linux platforms without sync_file_range (e.g. Android 7 with Termux), #4905 Other: - ignore --stats when given with --dry-run, but continue, #4373 - add some ProgressIndicator msgids to code / fix docs, #4935 - elaborate on "Calculating size" message - argparser: always use REPOSITORY in metavar, also use more consistent help phrasing. - check: improve error output for matching index size, see #4829 - docs: - changelog: add advisory about hashindex_set bug #4829 - better describe BORG_SECURITY_DIR, BORG_CACHE_DIR, #4919 - infos about cache security assumptions, #4900 - add FAQ describing difference between a local repo vs. repo on a server. - document how to test exclusion patterns without performing an actual backup - timestamps in the files cache are now usually ctime, #4583 - fix bad reference to borg compact (does not exist in 1.1), #4660 - create: borg 1.1 is not future any more - extract: document limitation "needs empty destination", #4598 - how to supply a passphrase, use crypto devices, #4549 - fix osxfuse github link in installation docs - add example of exclude-norecurse rule in help patterns - update macOS Brew link - add note about software for automating backups, #4581 - AUTHORS: mention copyright+license for bundled msgpack - fix various code blocks in the docs, #4708 - updated docs to cover use of temp directory on remote, #4545 - add restore docs, #4670 - add a pull backup / push restore how-to, #1552 - add FAQ how to retain original paths, #4532 - explain difference between --exclude and --pattern, #4118 - add FAQs for SSH connection issues, #3866 - improve password FAQ, #4591 - reiterate that 'file cache names are absolute' in FAQ - tests: - cope with ANY error when importing pytest into borg.testsuite, #4652 - fix broken test that relied on improper zlib assumptions - test_fuse: filter out selinux xattrs, #4574 - travis / vagrant: - misc python versions removed / changed (due to openssl 1.1 compatibility) or added (3.7 and 3.8, for better borg compatibility testing) - binary building is on python 3.5.9 now - vagrant: - add new boxes: ubuntu 18.04 and 20.04, debian 10 - update boxes: openindiana, darwin, netbsd - remove old boxes: centos 6 - darwin: updated osxfuse to 3.10.4 - use debian/ubuntu pip/virtualenv packages - rather use python 3.6.2 than 3.6.0, fixes coverage/sqlite3 issue - use requirements.d/development.lock.txt to avoid compat issues - travis: - darwin: backport some install code / order from master - remove deprecated keyword "sudo" from travis config - allow osx builds to fail, #4955 this is due to travis-ci frequently being so slow that the OS X builds just fail because they exceed 50 minutes and get killed by travis. Version 1.1.10 (2019-05-16) --------------------------- Fixes: - extract: hang on partial extraction with ssh: repo, when hardlink master is not matched/extracted and borg hangs on related slave hardlink, #4350 - lrucache: regularly remove old FDs, #4427 - avoid stale filehandle issues, #3265 - freebsd: make xattr platform code api compatible with linux, #3952 - use whitelist approach for borg serve, #4097 - borg command shall terminate with rc 2 for ImportErrors, #4424 - create: only run stat_simple_attrs() once, this increases backup with lots of unchanged files performance by ~ 5%. - prune: fix incorrect borg prune --stats output with --dry-run, #4373 - key export: emit user-friendly error if repo key is exported to a directory, #4348 New features: - bundle latest supported msgpack-python release (0.5.6), remove msgpack-python from setup.py install_requires - by default we use the bundled code now. optionally, we still support using an external msgpack (see hints in setup.py), but this requires solid requirements management within distributions and is not recommended. borgbackup will break if you upgrade msgpack to an unsupported version. - display msgpack version as part of sysinfo (e.g. in tracebacks) - timestamp for borg delete --info added, #4359 - enable placeholder usage in --comment and --glob-archives, #4559, #4495 Other: - serve: do not check python/libc for borg serve, #4483 - shell completions: borg diff second archive - release scripts: signing binaries with Qubes OS support - testing: - vagrant: upgrade openbsd box to 6.4 - travis-ci: lock test env to py 3.4 compatible versions, #4343 - get rid of confusing coverage warning, #2069 - rename test_mount_hardlinks to test_fuse_mount_hardlinks, so both can be excluded by "not test_fuse". - pure-py msgpack warning shall not make a lot of tests fail, #4558 - docs: - add "SSH Configuration" section to "borg serve", #3988, #636, #4485 - README: new URL for funding options - add a sample logging.conf in docs/misc, #4380 - elaborate on append-only mode docs, #3504 - installation: added Alpine Linux to distribution list, #4415 - usage.html: only modify window.location when redirecting, #4133 - add msgpack license to docs/3rd_party/msgpack - vagrant / binary builds: - use python 3.5.7 for builds - use osxfuse 3.8.3 Version 1.1.9 (2019-02-10) -------------------------- Fixes: - security fix: configure FUSE with "default_permissions", #3903 "default_permissions" is now enforced by borg by default to let the kernel check uid/gid/mode based permissions. "ignore_permissions" can be given to not enforce "default_permissions". - make "hostname" short, even on misconfigured systems, #4262 - fix free space calculation on macOS (and others?), #4289 - config: quit with error message when no key is provided, #4223 - recover_segment: handle too small segment files correctly, #4272 - correctly release memoryview, #4243 - avoid diaper pattern in configparser by opening files, #4263 - add "# cython: language_level=3" directive to .pyx files, #4214 - info: consider part files for "This archive" stats, #3522 - work around Microsoft WSL issue #645 (sync_file_range), #1961 New features: - add --rsh command line option to complement BORG_RSH env var, #1701 - init: --make-parent-dirs parent1/parent2/repo_dir, #4235 Other: - add archive name to check --repair output, #3447 - check for unsupported msgpack versions - shell completions: - new shell completions for borg 1.1.9 - more complete shell completions for borg mount -o - added shell completions for borg help - option arguments for zsh tab completion - docs: - add FAQ regarding free disk space check, #3905 - update BORG_PASSCOMMAND example and clarify variable expansion, #4249 - FAQ regarding change of compression settings, #4222 - add note about BSD flags to changelog, #4246 - improve logging in example automation script - add note about files changing during backup, #4081 - work around the backslash issue, #4280 - update release workflow using twine (docs, scripts), #4213 - add warnings on repository copies to avoid future problems, #4272 - tests: - fix the homebrew 1.9 issues on travis-ci, #4254 - fix duplicate test method name, #4311 - test_mount_hardlinks: get rid of fakeroot-caused test fails, #3389 Version 1.1.8 (2018-12-09) -------------------------- Fixes: - enforce storage quota if set by serve-command, #4093 - invalid locations: give err msg containing parsed location, #4179 - list repo: add placeholders for hostname and username, #4130 - on linux, symlinks can't have ACLs, so don't try to set any, #4044 New features: - create: added PATH::archive output on INFO log level - read a passphrase from a file descriptor specified in the BORG_PASSPHRASE_FD environment variable. Other: - docs: - option --format is required for some expensive-to-compute values for json borg list by default does not compute expensive values except when they are needed. whether they are needed is determined by the format, in standard mode as well as in --json mode. - tell that our binaries are x86/x64 amd/intel, bauerj has ARM - fixed wrong archive name pattern in CRUD benchmark help - fixed link to cachedir spec in docs, #4140 - tests: - stop using fakeroot on travis, avoids sporadic EISDIR errors, #2482 - xattr key names must start with "user." on linux - fix code so flake8 3.6 does not complain - explicitly convert environment variable to str, #4136 - fix DeprecationWarning: Flags not at the start of the expression, #4137 - support pytest4, #4172 - vagrant: - use python 3.5.6 for builds Version 1.1.7 (2018-08-11) -------------------------- Compatibility notes: - added support for Python 3.7 Fixes: - cache lock: use lock_wait everywhere to fix infinite wait, see #3968 - don't archive tagged dir when recursing an excluded dir, #3991 - py37 argparse: work around bad default in py 3.7.0a/b/rc, #3996 - py37 remove loggerDict.clear() from tearDown method, #3805 - some fixes for bugs which likely did not result in problems in practice: - fixed logic bug in platform module API version check - fixed xattr/acl function prototypes, added missing ones New features: - init: add warning to store both key and passphrase at safe place(s) - BORG_HOST_ID env var to work around all-zero MAC address issue, #3985 - borg debug dump-repo-objs --ghost (dump everything from segment files, including deleted or superceded objects or commit tags) - borg debug search-repo-objs (search in repo objects for hex bytes or strings) Other changes: - add Python 3.7 support - updated shell completions - call socket.gethostname only once - locking: better logging, add some asserts - borg debug dump-repo-objs: - filename layout improvements - use repository.scan() to get on-disk order - docs: - update installation instructions for macOS - added instructions to install fuse via homebrew - improve diff docs - added note that checkpoints inside files requires 1.1+ - add link to tempfile module - remove row/column-spanning from docs source, #4000 #3990 - tests: - fetch less data via os.urandom - add py37 env for tox - travis: add 3.7, remove 3.6-dev (we test with -dev in master) - vagrant / binary builds: - use osxfuse 3.8.2 - use own (uptodate) openindiana box Version 1.1.6 (2018-06-11) -------------------------- Compatibility notes: - 1.1.6 changes: - also allow msgpack-python 0.5.6. Fixes: - fix borg exception handling on ENOSPC error with xattrs, #3808 - prune: fix/improve overall progress display - borg config repo ... does not need cache/manifest/key, #3802 - debug dump-repo-objs should not depend on a manifest obj - pypi package: - include .coveragerc, needed by tox.ini - fix package long description, #3854 New features: - mount: add uid, gid, umask mount options - delete: - only commit once, #3823 - implement --dry-run, #3822 - check: - show progress while rebuilding missing manifest, #3787 - more --repair output - borg config --list , #3612 Other changes: - update msgpack requirement, #3753 - update bundled zstd to 1.3.4, #3745 - update bundled lz4 code to 1.8.2, #3870 - docs: - describe what BORG_LIBZSTD_PREFIX does - fix and deduplicate encryption quickstart docs, #3776 - vagrant: - FUSE for macOS: upgrade 3.7.1 to 3.8.0 - exclude macOS High Sierra upgrade on the darwin64 machine - remove borgbackup.egg-info dir in fs_init (after rsync) - use pyenv-based build/test on jessie32/62 - use local 32 and 64bit debian jessie boxes - use "vagrant" as username for new xenial box - travis OS X: use xcode 8.3 (not broken) Version 1.1.5 (2018-04-01) -------------------------- Compatibility notes: - 1.1.5 changes: - require msgpack-python >= 0.4.6 and < 0.5.0. 0.5.0+ dropped python 3.4 testing and also caused some other issues because the python package was renamed to msgpack and emitted some FutureWarning. Fixes: - create --list: fix that it was never showing M status, #3492 - create: fix timing for first checkpoint (read files cache early, init checkpoint timer after that), see #3394 - extract: set rc=1 when extracting damaged files with all-zero replacement chunks or with size inconsistencies, #3448 - diff: consider an empty file as different to a non-existing file, #3688 - files cache: improve exception handling, #3553 - ignore exceptions in scandir_inorder() caused by an implicit stat(), also remove unneeded sort, #3545 - fixed tab completion problem where a space is always added after path even when it shouldn't - build: do .h file content checks in binary mode, fixes build issue for non-ascii header files on pure-ascii locale platforms, #3544 #3639 - borgfs: fix patterns/paths processing, #3551 - config: add some validation, #3566 - repository config: add validation for max_segment_size, #3592 - set cache previous_location on load instead of save - remove platform.uname() call which caused library mismatch issues, #3732 - add exception handler around deprecated platform.linux_distribution() call - use same datetime object for {now} and {utcnow}, #3548 New features: - create: implement --stdin-name, #3533 - add chunker_params to borg archive info (--json) - BORG_SHOW_SYSINFO=no to hide system information from exceptions Other changes: - updated zsh completions for borg 1.1.4 - files cache related code cleanups - be more helpful when parsing invalid --pattern values, #3575 - be more clear in secure-erase warning message, #3591 - improve getpass user experience, #3689 - docs build: unicode problem fixed when using a py27-based sphinx - docs: - security: explicitly note what happens OUTSIDE the attack model - security: add note about combining compression and encryption - security: describe chunk size / proximity issue, #3687 - quickstart: add note about permissions, borg@localhost, #3452 - quickstart: add introduction to repositories & archives, #3620 - recreate --recompress: add missing metavar, clarify description, #3617 - improve logging docs, #3549 - add an example for --pattern usage, #3661 - clarify path semantics when matching, #3598 - link to offline documentation from README, #3502 - add docs on how to verify a signed release with GPG, #3634 - chunk seed is generated per repository (not: archive) - better formatting of CPU usage documentation, #3554 - extend append-only repo rollback docs, #3579 - tests: - fix erroneously skipped zstd compressor tests, #3606 - skip a test if argparse is broken, #3705 - vagrant: - xenial64 box now uses username 'vagrant', #3707 - move cleanup steps to fs_init, #3706 - the boxcutter wheezy boxes are 404, use local ones - update to Python 3.5.5 (for binary builds) Version 1.1.4 (2017-12-31) -------------------------- Compatibility notes: - When upgrading from borg 1.0.x to 1.1.x, please note: - read all the compatibility notes for 1.1.0*, starting from 1.1.0b1. - borg upgrade: you do not need to and you also should not run it. - borg might ask some security-related questions once after upgrading. You can answer them either manually or via environment variable. One known case is if you use unencrypted repositories, then it will ask about a unknown unencrypted repository one time. - your first backup with 1.1.x might be significantly slower (it might completely read, chunk, hash a lot files) - this is due to the --files-cache mode change (and happens every time you change mode). You can avoid the one-time slowdown by using the pre-1.1.0rc4-compatible mode (but that is less safe for detecting changed files than the default). See the --files-cache docs for details. - borg 1.1.4 changes: - zstd compression is new in borg 1.1.4, older borg can't handle it. - new minimum requirements for the compression libraries - if the required versions (header and lib) can't be found at build time, bundled code will be used: - added requirement: libzstd >= 1.3.0 (bundled: 1.3.2) - updated requirement: liblz4 >= 1.7.0 / r129 (bundled: 1.8.0) Fixes: - check: data corruption fix: fix for borg check --repair malfunction, #3444. See the more detailled notes close to the top of this document. - delete: also delete security dir when deleting a repo, #3427 - prune: fix building the "borg prune" man page, #3398 - init: use given --storage-quota for local repo, #3470 - init: properly quote repo path in output - fix startup delay with dns-only own fqdn resolving, #3471 New features: - added zstd compression. try it! - added placeholder {reverse-fqdn} for fqdn in reverse notation - added BORG_BASE_DIR environment variable, #3338 Other changes: - list help topics when invalid topic is requested - fix lz4 deprecation warning, requires lz4 >= 1.7.0 (r129) - add parens for C preprocessor macro argument usages (did not cause malfunction) - exclude broken pytest 3.3.0 release - updated fish/bash completions - init: more clear exception messages for borg create, #3465 - docs: - add auto-generated docs for borg config - don't generate HTML docs page for borgfs, #3404 - docs update for lz4 b2 zstd changes - add zstd to compression help, readme, docs - update requirements and install docs about bundled lz4 and zstd - refactored build of the compress and crypto.low_level extensions, #3415: - move some lib/build related code to setup_{zstd,lz4,b2}.py - bundle lz4 1.8.0 (requirement: >= 1.7.0 / r129) - bundle zstd 1.3.2 (requirement: >= 1.3.0) - blake2 was already bundled - rename BORG_LZ4_PREFIX env var to BORG_LIBLZ4_PREFIX for better consistency: we also have BORG_LIBB2_PREFIX and BORG_LIBZSTD_PREFIX now. - add prefer_system_lib* = True settings to setup.py - by default the build will prefer a shared library over the bundled code, if library and headers can be found and meet the minimum requirements. Version 1.1.3 (2017-11-27) -------------------------- Fixes: - Security Fix for CVE-2017-15914: Incorrect implementation of access controls allows remote users to override repository restrictions in Borg servers. A user able to access a remote Borg SSH server is able to circumvent access controls post-authentication. Affected releases: 1.1.0, 1.1.1, 1.1.2. Releases 1.0.x are NOT affected. - crc32: deal with unaligned buffer, add tests - this broke borg on older ARM CPUs that can not deal with unaligned 32bit memory accesses and raise a bus error in such cases. the fix might also improve performance on some CPUs as all 32bit memory accesses by the crc32 code are properly aligned now. #3317 - mount: fixed support of --consider-part-files and do not show .borg_part_N files by default in the mounted FUSE filesystem. #3347 - fixed cache/repo timestamp inconsistency message, highlight that information is obtained from security dir (deleting the cache will not bypass this error in case the user knows this is a legitimate repo). - borgfs: don't show sub-command in borgfs help, #3287 - create: show an error when --dry-run and --stats are used together, #3298 New features: - mount: added exclusion group options and paths, #2138 Reused some code to support similar options/paths as borg extract offers - making good use of these to only mount a smaller subset of dirs/files can speed up mounting a lot and also will consume way less memory. borg mount [options] repo_or_archive mountpoint path [paths...] paths: you can just give some "root paths" (like for borg extract) to only partially populate the FUSE filesystem. new options: --exclude[-from], --pattern[s-from], --strip-components - create/extract: support st_birthtime on platforms supporting it, #3272 - add "borg config" command for querying/setting/deleting config values, #3304 Other changes: - clean up and simplify packaging (only package committed files, do not install .c/.h/.pyx files) - docs: - point out tuning options for borg create, #3239 - add instructions for using ntfsclone, zerofree, #81 - move image backup-related FAQ entries to a new page - clarify key aliases for borg list --format, #3111 - mention break-lock in checkpointing FAQ entry, #3328 - document sshfs rename workaround, #3315 - add FAQ about removing files from existing archives - add FAQ about different prune policies - usage and man page for borgfs, #3216 - clarify create --stats duration vs. wall time, #3301 - clarify encrypted key format for borg key export, #3296 - update release checklist about security fixes - document good and problematic option placements, fix examples, #3356 - add note about using --nobsdflags to avoid speed penalty related to bsdflags, #3239 - move most of support section to www.borgbackup.org Version 1.1.2 (2017-11-05) -------------------------- Fixes: - fix KeyError crash when talking to borg server < 1.0.7, #3244 - extract: set bsdflags last (include immutable flag), #3263 - create: don't do stat() call on excluded-norecurse directory, fix exception handling for stat() call, #3209 - create --stats: do not count data volume twice when checkpointing, #3224 - recreate: move chunks_healthy when excluding hardlink master, #3228 - recreate: get rid of chunks_healthy when rechunking (does not match), #3218 - check: get rid of already existing not matching chunks_healthy metadata, #3218 - list: fix stdout broken pipe handling, #3245 - list/diff: remove tag-file options (not used), #3226 New features: - bash, zsh and fish shell auto-completions, see scripts/shell_completions/ - added BORG_CONFIG_DIR env var, #3083 Other changes: - docs: - clarify using a blank passphrase in keyfile mode - mention "!" (exclude-norecurse) type in "patterns" help - document to first heal before running borg recreate to re-chunk stuff, because that will have to get rid of chunks_healthy metadata. - more than 23 is not supported for CHUNK_MAX_EXP, #3115 - borg does not respect nodump flag by default any more - clarify same-filesystem requirement for borg upgrade, #2083 - update / rephrase cygwin / WSL status, #3174 - improve docs about --stats, #3260 - vagrant: openindiana new clang package Already contained in 1.1.1 (last minute fix): - arg parsing: fix fallback function, refactor, #3205. This is a fixup for #3155, which was broken on at least python <= 3.4.2. Version 1.1.1 (2017-10-22) -------------------------- Compatibility notes: - The deprecated --no-files-cache is not a global/common option any more, but only available for borg create (it is not needed for anything else). Use --files-cache=disabled instead of --no-files-cache. - The nodump flag ("do not backup this file") is not honoured any more by default because this functionality (esp. if it happened by error or unexpected) was rather confusing and unexplainable at first to users. If you want that "do not backup NODUMP-flagged files" behaviour, use: borg create --exclude-nodump ... - If you are on Linux and do not need bsdflags archived, consider using ``--nobsdflags`` with ``borg create`` to avoid additional syscalls and speed up backup creation. Fixes: - borg recreate: correctly compute part file sizes. fixes cosmetic, but annoying issue as borg check complains about size inconsistencies of part files in affected archives. you can solve that by running borg recreate on these archives, see also #3157. - bsdflags support: do not open BLK/CHR/LNK files, avoid crashes and slowness, #3130 - recreate: don't crash on attic archives w/o time_end, #3109 - don't crash on repository filesystems w/o hardlink support, #3107 - don't crash in first part of truncate_and_unlink, #3117 - fix server-side IndexError crash with clients < 1.0.7, #3192 - don't show traceback if only a global option is given, show help, #3142 - cache: use SaveFile for more safety, #3158 - init: fix wrong encryption choices in command line parser, fix missing "authenticated-blake2", #3103 - move --no-files-cache from common to borg create options, #3146 - fix detection of non-local path (failed on ..filename), #3108 - logging with fileConfig: set json attr on "borg" logger, #3114 - fix crash with relative BORG_KEY_FILE, #3197 - show excluded dir with "x" for tagged dirs / caches, #3189 New features: - create: --nobsdflags and --exclude-nodump options, #3160 - extract: --nobsdflags option, #3160 Other changes: - remove annoying hardlinked symlinks warning, #3175 - vagrant: use self-made FreeBSD 10.3 box, #3022 - travis: don't brew update, hopefully fixes #2532 - docs: - readme: -e option is required in borg 1.1 - add example showing --show-version --show-rc - use --format rather than --list-format (deprecated) in example - update docs about hardlinked symlinks limitation Version 1.1.0 (2017-10-07) -------------------------- Compatibility notes: - borg command line: do not put options in between positional arguments This sometimes works (e.g. it worked in borg 1.0.x), but can easily stop working if we make positional arguments optional (like it happened for borg create's "paths" argument in 1.1). There are also places in borg 1.0 where we do that, so it doesn't work there in general either. #3356 Good: borg create -v --stats repo::archive path Good: borg create repo::archive path -v --stats Bad: borg create repo::archive -v --stats path Fixes: - fix LD_LIBRARY_PATH restoration for subprocesses, #3077 - "auto" compression: make sure expensive compression is actually better, otherwise store lz4 compressed data we already computed. Other changes: - docs: - FAQ: we do not implement futile attempts of ETA / progress displays - manpage: fix typos, update homepage - implement simple "issue" role for manpage generation, #3075 Version 1.1.0rc4 (2017-10-01) ----------------------------- Compatibility notes: - A borg server >= 1.1.0rc4 does not support borg clients 1.1.0b3-b5. #3033 - The files cache is now controlled differently and has a new default mode: - the files cache now uses ctime by default for improved file change detection safety. You can still use mtime for more speed and less safety. - --ignore-inode is deprecated (use --files-cache=... without "inode") - --no-files-cache is deprecated (use --files-cache=disabled) New features: - --files-cache - implement files cache mode control, #911 You can now control the files cache mode using this option: --files-cache={ctime,mtime,size,inode,rechunk,disabled} (only some combinations are supported). See the docs for details. Fixes: - remote progress/logging: deal with partial lines, #2637 - remote progress: flush json mode output - fix subprocess environments, #3050 (and more) Other changes: - remove client_supports_log_v3 flag, #3033 - exclude broken Cython 0.27(.0) in requirements, #3066 - vagrant: - upgrade to FUSE for macOS 3.7.1 - use Python 3.5.4 to build the binaries - docs: - security: change-passphrase only changes the passphrase, #2990 - fixed/improved borg create --compression examples, #3034 - add note about metadata dedup and --no[ac]time, #2518 - twitter account @borgbackup now, better visible, #2948 - simplified rate limiting wrapper in FAQ Version 1.1.0rc3 (2017-09-10) ----------------------------- New features: - delete: support naming multiple archives, #2958 Fixes: - repo cleanup/write: invalidate cached FDs, #2982 - fix datetime.isoformat() microseconds issues, #2994 - recover_segment: use mmap(), lower memory needs, #2987 Other changes: - with-lock: close segment file before invoking subprocess - keymanager: don't depend on optional readline module, #2976 - docs: - fix macOS keychain integration command - show/link new screencasts in README, #2936 - document utf-8 locale requirement for json mode, #2273 - vagrant: clean up shell profile init, user name, #2977 - test_detect_attic_repo: don't test mount, #2975 - add debug logging for repository cleanup Version 1.1.0rc2 (2017-08-28) ----------------------------- Compatibility notes: - list: corrected mix-up of "isomtime" and "mtime" formats. Previously, "isomtime" was the default but produced a verbose human format, while "mtime" produced a ISO-8601-like format. The behaviours have been swapped (so "mtime" is human, "isomtime" is ISO-like), and the default is now "mtime". "isomtime" is now a real ISO-8601 format ("T" between date and time, not a space). New features: - None. Fixes: - list: fix weird mixup of mtime/isomtime - create --timestamp: set start time, #2957 - ignore corrupt files cache, #2939 - migrate locks to child PID when daemonize is used - fix exitcode of borg serve, #2910 - only compare contents when chunker params match, #2899 - umount: try fusermount, then try umount, #2863 Other changes: - JSON: use a more standard ISO 8601 datetime format, #2376 - cache: write_archive_index: truncate_and_unlink on error, #2628 - detect non-upgraded Attic repositories, #1933 - delete various nogil and threading related lines - coala / pylint related improvements - docs: - renew asciinema/screencasts, #669 - create: document exclusion through nodump, #2949 - minor formatting fixes - tar: tarpipe example - improve "with-lock" and "info" docs, #2869 - detail how to use macOS/GNOME/KDE keyrings for repo passwords, #392 - travis: only short-circuit docs-only changes for pull requests - vagrant: - netbsd: bash is already installed - fix netbsd version in PKG_PATH - add exe location to PATH when we build an exe Version 1.1.0rc1 (2017-07-24) ----------------------------- Compatibility notes: - delete: removed short option for --cache-only New features: - support borg list repo --format {comment} {bcomment} {end}, #2081 - key import: allow reading from stdin, #2760 Fixes: - with-lock: avoid creating segment files that might be overwritten later, #1867 - prune: fix checkpoints processing with --glob-archives - FUSE: versions view: keep original file extension at end, #2769 - fix --last, --first: do not accept values <= 0, fix reversed archive ordering with --last - include testsuite data (attic.tar.gz) when installing the package - use limited unpacker for outer key, for manifest (both security precautions), #2174 #2175 - fix bashism in shell scripts, #2820, #2816 - cleanup endianness detection, create _endian.h, fixes build on alpine linux, #2809 - fix crash with --no-cache-sync (give known chunk size to chunk_incref), #2853 Other changes: - FUSE: versions view: linear numbering by archive time - split up interval parsing from filtering for --keep-within, #2610 - add a basic .editorconfig, #2734 - use archive creation time as mtime for FUSE mount, #2834 - upgrade FUSE for macOS (osxfuse) from 3.5.8 to 3.6.3, #2706 - hashindex: speed up by replacing modulo with "if" to check for wraparound - coala checker / pylint: fixed requirements and .coafile, more ignores - borg upgrade: name backup directories as 'before-upgrade', #2811 - add .mailmap - some minor changes suggested by lgtm.com - docs: - better explanation of the --ignore-inode option relevance, #2800 - fix openSUSE command and add openSUSE section - simplify ssh authorized_keys file using "restrict", add legacy note, #2121 - mount: show usage of archive filters - mount: add repository example, #2462 - info: update and add examples, #2765 - prune: include example - improved style / formatting - improved/fixed segments_per_dir docs - recreate: fix wrong "remove unwanted files" example - reference list of status chars in borg recreate --filter description - update source-install docs about doc build dependencies, #2795 - cleanup installation docs - file system requirements, update segs per dir - fix checkpoints/parts reference in FAQ, #2859 - code: - hashindex: don't pass side effect into macro - crypto low_level: don't mutate local bytes() - use dash_open function to open file or "-" for stdin/stdout - archiver: argparse cleanup / refactoring - shellpattern: add match_end arg - tests: added some additional unit tests, some fixes, #2700 #2710 - vagrant: fix setup of cygwin, add Debian 9 "stretch" - travis: don't perform full travis build on docs-only changes, #2531 Version 1.1.0b6 (2017-06-18) ---------------------------- Compatibility notes: - Running "borg init" via a "borg serve --append-only" server will *not* create an append-only repository anymore. Use "borg init --append-only" to initialize an append-only repository. - Repositories in the "repokey" and "repokey-blake2" modes with an empty passphrase are now treated as unencrypted repositories for security checks (e.g. BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK). Previously there would be no prompts nor messages if an unknown repository in one of these modes with an empty passphrase was encountered. This would allow an attacker to swap a repository, if one assumed that the lack of password prompts was due to a set BORG_PASSPHRASE. Since the "trick" does not work if BORG_PASSPHRASE is set, this does generally not affect scripts. - Repositories in the "authenticated" mode are now treated as the unencrypted repositories they are. - The client-side temporary repository cache now holds unencrypted data for better speed. - borg init: removed the short form of --append-only (-a). - borg upgrade: removed the short form of --inplace (-i). New features: - reimplemented the RepositoryCache, size-limited caching of decrypted repo contents, integrity checked via xxh64. #2515 - reduced space usage of chunks.archive.d. Existing caches are migrated during a cache sync. #235 #2638 - integrity checking using xxh64 for important files used by borg, #1101: - repository: index and hints files - cache: chunks and files caches, chunks.archive.d - improve cache sync speed, #1729 - create: new --no-cache-sync option - add repository mandatory feature flags infrastructure, #1806 - Verify most operations against SecurityManager. Location, manifest timestamp and key types are now checked for almost all non-debug commands. #2487 - implement storage quotas, #2517 - serve: add --restrict-to-repository, #2589 - BORG_PASSCOMMAND: use external tool providing the key passphrase, #2573 - borg export-tar, #2519 - list: --json-lines instead of --json for archive contents, #2439 - add --debug-profile option (and also "borg debug convert-profile"), #2473 - implement --glob-archives/-a, #2448 - normalize authenticated key modes for better naming consistency: - rename "authenticated" to "authenticated-blake2" (uses blake2b) - implement "authenticated" mode (uses hmac-sha256) Fixes: - hashindex: read/write indices >2 GiB on 32bit systems, better error reporting, #2496 - repository URLs: implement IPv6 address support and also more informative error message when parsing fails. - mount: check whether llfuse is installed before asking for passphrase, #2540 - mount: do pre-mount checks before opening repository, #2541 - FUSE: - fix crash if empty (None) xattr is read, #2534 - fix read(2) caching data in metadata cache - fix negative uid/gid crash (fix crash when mounting archives of external drives made on cygwin), #2674 - redo ItemCache, on top of object cache - use decrypted cache - remove unnecessary normpaths - serve: ignore --append-only when initializing a repository (borg init), #2501 - serve: fix incorrect type of exception_short for Errors, #2513 - fix --exclude and --exclude-from recursing into directories, #2469 - init: don't allow creating nested repositories, #2563 - --json: fix encryption[mode] not being the cmdline name - remote: propagate Error.traceback correctly - fix remote logging and progress, #2241 - implement --debug-topic for remote servers - remote: restore "Remote:" prefix (as used in 1.0.x) - rpc negotiate: enable v3 log protocol only for supported clients - fix --progress and logging in general for remote - fix parse_version, add tests, #2556 - repository: truncate segments (and also some other files) before unlinking, #2557 - recreate: keep timestamps as in original archive, #2384 - recreate: if single archive is not processed, exit 2 - patterns: don't recurse with ! / --exclude for pf:, #2509 - cache sync: fix n^2 behaviour in lookup_name - extract: don't write to disk with --stdout (affected non-regular-file items), #2645 - hashindex: implement KeyError, more tests Other changes: - remote: show path in PathNotAllowed - consider repokey w/o passphrase == unencrypted, #2169 - consider authenticated mode == unencrypted, #2503 - restrict key file names, #2560 - document follow_symlinks requirements, check libc, use stat and chown with follow_symlinks=False, #2507 - support common options on the main command, #2508 - support common options on mid-level commands (e.g. borg *key* export) - make --progress a common option - increase DEFAULT_SEGMENTS_PER_DIR to 1000 - chunker: fix invalid use of types (function only used by tests) - chunker: don't do uint32_t >> 32 - FUSE: - add instrumentation (--debug and SIGUSR1/SIGINFO) - reduced memory usage for repository mounts by lazily instantiating archives - improved archive load times - info: use CacheSynchronizer & HashIndex.stats_against (better performance) - docs: - init: document --encryption as required - security: OpenSSL usage - security: used implementations; note python libraries - security: security track record of OpenSSL and msgpack - patterns: document denial of service (regex, wildcards) - init: note possible denial of service with "none" mode - init: document SHA extension is supported in OpenSSL and thus SHA is faster on AMD Ryzen than blake2b. - book: use A4 format, new builder option format. - book: create appendices - data structures: explain repository compaction - data structures: add chunk layout diagram - data structures: integrity checking - data structures: demingle cache and repo index - Attic FAQ: separate section for attic stuff - FAQ: I get an IntegrityError or similar - what now? - FAQ: Can I use Borg on SMR hard drives?, #2252 - FAQ: specify "using inline shell scripts" - add systemd warning regarding placeholders, #2543 - xattr: document API - add docs/misc/borg-data-flow data flow chart - debugging facilities - README: how to help the project, #2550 - README: add bountysource badge, #2558 - fresh new theme + tweaking - logo: vectorized (PDF and SVG) versions - frontends: use headlines - you can link to them - mark --pattern, --patterns-from as experimental - highlight experimental features in online docs - remove regex based pattern examples, #2458 - nanorst for "borg help TOPIC" and --help - split deployment - deployment: hosting repositories - deployment: automated backups to a local hard drive - development: vagrant, windows10 requirements - development: update docs remarks - split usage docs, #2627 - usage: avoid bash highlight, [options] instead of - usage: add benchmark page - helpers: truncate_and_unlink doc - don't suggest to leak BORG_PASSPHRASE - internals: columnize rather long ToC [webkit fixup] internals: manifest & feature flags - internals: more HashIndex details - internals: fix ASCII art equations - internals: edited obj graph related sections a bit - internals: layers image + description - fix way too small figures in pdf - index: disable syntax highlight (bash) - improve options formatting, fix accidental block quotes - testing / checking: - add support for using coala, #1366 - testsuite: add ArchiverCorruptionTestCase - do not test logger name, #2504 - call setup_logging after destroying logging config - testsuite.archiver: normalise pytest.raises vs. assert_raises - add test for preserved intermediate folder permissions, #2477 - key: add round-trip test - remove attic dependency of the tests, #2505 - enable remote tests on cygwin - tests: suppress tar's future timestamp warning - cache sync: add more refcount tests - repository: add tests, including corruption tests - vagrant: - control VM cpus and pytest workers via env vars VMCPUS and XDISTN - update cleaning workdir - fix openbsd shell - add OpenIndiana - packaging: - binaries: don't bundle libssl - setup.py clean to remove compiled files - fail in borg package if version metadata is very broken (setuptools_scm) - repo / code structure: - create borg.algorithms and borg.crypto packages - algorithms: rename crc32 to checksums - move patterns to module, #2469 - gitignore: complete paths for src/ excludes - cache: extract CacheConfig class - implement IntegrityCheckedFile + Detached variant, #2502 #1688 - introduce popen_with_error_handling to handle common user errors Version 1.1.0b5 (2017-04-30) ---------------------------- Compatibility notes: - BORG_HOSTNAME_IS_UNIQUE is now on by default. - removed --compression-from feature - recreate: add --recompress flag, unify --always-recompress and --recompress Fixes: - catch exception for os.link when hardlinks are not supported, #2405 - borg rename / recreate: expand placeholders, #2386 - generic support for hardlinks (files, devices, FIFOs), #2324 - extract: also create parent dir for device files, if needed, #2358 - extract: if a hardlink master is not in the to-be-extracted subset, the "x" status was not displayed for it, #2351 - embrace y2038 issue to support 32bit platforms: clamp timestamps to int32, #2347 - verify_data: fix IntegrityError handling for defect chunks, #2442 - allow excluding parent and including child, #2314 Other changes: - refactor compression decision stuff - change global compression default to lz4 as well, to be consistent with --compression defaults. - placeholders: deny access to internals and other unspecified stuff - clearer error message for unrecognized placeholder - more clear exception if borg check does not help, #2427 - vagrant: upgrade FUSE for macOS to 3.5.8, #2346 - linux binary builds: get rid of glibc 2.13 dependency, #2430 - docs: - placeholders: document escaping - serve: env vars in original commands are ignored - tell what kind of hardlinks we support - more docs about compression - LICENSE: use canonical formulation ("copyright holders and contributors" instead of "author") - document borg init behaviour via append-only borg serve, #2440 - be clear about what buzhash is used for, #2390 - add hint about chunker params, #2421 - clarify borg upgrade docs, #2436 - FAQ to explain warning when running borg check --repair, #2341 - repository file system requirements, #2080 - pre-install considerations - misc. formatting / crossref fixes - tests: - enhance travis setuptools_scm situation - add extra test for the hashindex - fix invalid param issue in benchmarks These belong to 1.1.0b4 release, but did not make it into changelog by then: - vagrant: increase memory for parallel testing - lz4 compress: lower max. buffer size, exception handling - add docstring to do_benchmark_crud - patterns help: mention path full-match in intro Version 1.1.0b4 (2017-03-27) ---------------------------- Compatibility notes: - init: the --encryption argument is mandatory now (there are several choices) - moved "borg migrate-to-repokey" to "borg key migrate-to-repokey". - "borg change-passphrase" is deprecated, use "borg key change-passphrase" instead. - the --exclude-if-present option now supports tagging a folder with any filesystem object type (file, folder, etc), instead of expecting only files as tags, #1999 - the --keep-tag-files option has been deprecated in favor of the new --keep-exclude-tags, to account for the change mentioned above. - use lz4 compression by default, #2179 New features: - JSON API to make developing frontends and automation easier (see :ref:`json_output`) - add JSON output to commands: `borg create/list/info --json ...`. - add --log-json option for structured logging output. - add JSON progress information, JSON support for confirmations (yes()). - add two new options --pattern and --patterns-from as discussed in #1406 - new path full match pattern style (pf:) for very fast matching, #2334 - add 'debug dump-manifest' and 'debug dump-archive' commands - add 'borg benchmark crud' command, #1788 - new 'borg delete --force --force' to delete severely corrupted archives, #1975 - info: show utilization of maximum archive size, #1452 - list: add dsize and dcsize keys, #2164 - paperkey.html: Add interactive html template for printing key backups. - key export: add qr html export mode - securely erase config file (which might have old encryption key), #2257 - archived file items: add size to metadata, 'borg extract' and 'borg check' do check the file size for consistency, FUSE uses precomputed size from Item. Fixes: - fix remote speed regression introduced in 1.1.0b3, #2185 - fix regression handling timestamps beyond 2262 (revert bigint removal), introduced in 1.1.0b3, #2321 - clamp (nano)second values to unproblematic range, #2304 - hashindex: rebuild hashtable if we have too little empty buckets (performance fix), #2246 - Location regex: fix bad parsing of wrong syntax - ignore posix_fadvise errors in repository.py, #2095 - borg rpc: use limited msgpack.Unpacker (security precaution), #2139 - Manifest: Make sure manifest timestamp is strictly monotonically increasing. - create: handle BackupOSError on a per-path level in one spot - create: clarify -x option / meaning of "same filesystem" - create: don't create hard link refs to failed files - archive check: detect and fix missing all-zero replacement chunks, #2180 - files cache: update inode number when --ignore-inode is used, #2226 - fix decompression exceptions crashing ``check --verify-data`` and others instead of reporting integrity error, #2224 #2221 - extract: warning for unextracted big extended attributes, #2258, #2161 - mount: umount on SIGINT/^C when in foreground - mount: handle invalid hard link refs - mount: fix huge RAM consumption when mounting a repository (saves number of archives * 8 MiB), #2308 - hashindex: detect mingw byte order #2073 - hashindex: fix wrong skip_hint on hashindex_set when encountering tombstones, the regression was introduced in #1748 - fix ChunkIndex.__contains__ assertion for big-endian archs - fix borg key/debug/benchmark crashing without subcommand, #2240 - Location: accept //servername/share/path - correct/refactor calculation of unique/non-unique chunks - extract: fix missing call to ProgressIndicator.finish - prune: fix error msg, it is --keep-within, not --within - fix "auto" compression mode bug (not compressing), #2331 - fix symlink item fs size computation, #2344 Other changes: - remote repository: improved async exception processing, #2255 #2225 - with --compression auto,C, only use C if lz4 achieves at least 3% compression - PatternMatcher: only normalize path once, #2338 - hashindex: separate endian-dependent defs from endian detection - migrate-to-repokey: ask using canonical_path() as we do everywhere else. - SyncFile: fix use of fd object after close - make LoggedIO.close_segment reentrant - creating a new segment: use "xb" mode, #2099 - redo key_creator, key_factory, centralise key knowledge, #2272 - add return code functions, #2199 - list: only load cache if needed - list: files->items, clarifications - list: add "name" key for consistency with info cmd - ArchiveFormatter: add "start" key for compatibility with "info" - RemoteRepository: account rx/tx bytes - setup.py build_usage/build_man/build_api fixes - Manifest.in: simplify, exclude .so, .dll and .orig, #2066 - FUSE: get rid of chunk accounting, st_blocks = ceil(size / blocksize). - tests: - help python development by testing 3.6-dev - test for borg delete --force - vagrant: - freebsd: some fixes, #2067 - darwin64: use osxfuse 3.5.4 for tests / to build binaries - darwin64: improve VM settings - use python 3.5.3 to build binaries, #2078 - upgrade pyinstaller from 3.1.1+ to 3.2.1 - pyinstaller: use fixed AND freshly compiled bootloader, #2002 - pyinstaller: automatically builds bootloader if missing - docs: - create really nice man pages - faq: mention --remote-ratelimit in bandwidth limit question - fix caskroom link, #2299 - docs/security: reiterate that RPC in Borg does no networking - docs/security: counter tracking, #2266 - docs/development: update merge remarks - address SSH batch mode in docs, #2202 #2270 - add warning about running build_usage on Python >3.4, #2123 - one link per distro in the installation page - improve --exclude-if-present and --keep-exclude-tags, #2268 - improve automated backup script in doc, #2214 - improve remote-path description - update docs for create -C default change (lz4) - document relative path usage, #1868 - document snapshot usage, #2178 - corrected some stuff in internals+security - internals: move toctree to after the introduction text - clarify metadata kind, manifest ops - key enc: correct / clarify some stuff, link to internals/security - datas: enc: 1.1.x mas different MACs - datas: enc: correct factual error -- no nonce involved there. - make internals.rst an index page and edit it a bit - add "Cryptography in Borg" and "Remote RPC protocol security" sections - document BORG_HOSTNAME_IS_UNIQUE, #2087 - FAQ by categories as proposed by @anarcat in #1802 - FAQ: update Which file types, attributes, etc. are *not* preserved? - development: new branching model for git repository - development: define "ours" merge strategy for auto-generated files - create: move --exclude note to main doc - create: move item flags to main doc - fix examples using borg init without -e/--encryption - list: don't print key listings in fat (html + man) - remove Python API docs (were very incomplete, build problems on RTFD) - added FAQ section about backing up root partition Version 1.0.10 (2017-02-13) --------------------------- Bug fixes: - Manifest timestamps are now monotonically increasing, this fixes issues when the system clock jumps backwards or is set inconsistently across computers accessing the same repository, #2115 - Fixed testing regression in 1.0.10rc1 that lead to a hard dependency on py.test >= 3.0, #2112 New features: - "key export" can now generate a printable HTML page with both a QR code and a human-readable "paperkey" representation (and custom text) through the ``--qr-html`` option. The same functionality is also available through `paperkey.html `_, which is the same HTML page generated by ``--qr-html``. It works with existing "key export" files and key files. Other changes: - docs: - language clarification - "borg create --one-file-system" option does not respect mount points, but considers different file systems instead, #2141 - setup.py: build_api: sort file list for determinism Version 1.1.0b3 (2017-01-15) ---------------------------- Compatibility notes: - borg init: removed the default of "--encryption/-e", #1979 This was done so users do a informed decision about -e mode. Bug fixes: - borg recreate: don't rechunkify unless explicitly told so - borg info: fixed bug when called without arguments, #1914 - borg init: fix free space check crashing if disk is full, #1821 - borg debug delete/get obj: fix wrong reference to exception - fix processing of remote ~/ and ~user/ paths (regressed since 1.1.0b1), #1759 - posix platform module: only build / import on non-win32 platforms, #2041 New features: - new CRC32 implementations that are much faster than the zlib one used previously, #1970 - add blake2b key modes (use blake2b as MAC). This links against system libb2, if possible, otherwise uses bundled code - automatically remove stale locks - set BORG_HOSTNAME_IS_UNIQUE env var to enable stale lock killing. If set, stale locks in both cache and repository are deleted. #562 #1253 - borg info : print general repo information, #1680 - borg check --first / --last / --sort / --prefix, #1663 - borg mount --first / --last / --sort / --prefix, #1542 - implement "health" item formatter key, #1749 - BORG_SECURITY_DIR to remember security related infos outside the cache. Key type, location and manifest timestamp checks now survive cache deletion. This also means that you can now delete your cache and avoid previous warnings, since Borg can still tell it's safe. - implement BORG_NEW_PASSPHRASE, #1768 Other changes: - borg recreate: - remove special-cased --dry-run - update --help - remove bloat: interruption blah, autocommit blah, resuming blah - re-use existing checkpoint functionality - archiver tests: add check_cache tool - lints refcounts - fixed cache sync performance regression from 1.1.0b1 onwards, #1940 - syncing the cache without chunks.archive.d (see :ref:`disable_archive_chunks`) now avoids any merges and is thus faster, #1940 - borg check --verify-data: faster due to linear on-disk-order scan - borg debug-xxx commands removed, we use "debug xxx" subcommands now, #1627 - improve metadata handling speed - shortcut hashindex_set by having hashindex_lookup hint about address - improve / add progress displays, #1721 - check for index vs. segment files object count mismatch - make RPC protocol more extensible: use named parameters. - RemoteRepository: misc. code cleanups / refactors - clarify cache/repository README file - docs: - quickstart: add a comment about other (remote) filesystems - quickstart: only give one possible ssh url syntax, all others are documented in usage chapter. - mention file:// - document repo URLs / archive location - clarify borg diff help, #980 - deployment: synthesize alternative --restrict-to-path example - improve cache / index docs, esp. files cache docs, #1825 - document using "git merge 1.0-maint -s recursive -X rename-threshold=20%" for avoiding troubles when merging the 1.0-maint branch into master. - tests: - FUSE tests: catch ENOTSUP on freebsd - FUSE tests: test troublesome xattrs last - fix byte range error in test, #1740 - use monkeypatch to set env vars, but only on pytest based tests. - point XDG_*_HOME to temp dirs for tests, #1714 - remove all BORG_* env vars from the outer environment Version 1.0.10rc1 (2017-01-29) ------------------------------ Bug fixes: - borg serve: fix transmission data loss of pipe writes, #1268 This affects only the cygwin platform (not Linux, BSD, OS X). - Avoid triggering an ObjectiveFS bug in xattr retrieval, #1992 - When running out of buffer memory when reading xattrs, only skip the current file, #1993 - Fixed "borg upgrade --tam" crashing with unencrypted repositories. Since :ref:`the issue ` is not relevant for unencrypted repositories, it now does nothing and prints an error, #1981. - Fixed change-passphrase crashing with unencrypted repositories, #1978 - Fixed "borg check repo::archive" indicating success if "archive" does not exist, #1997 - borg check: print non-exit-code warning if --last or --prefix aren't fulfilled - fix bad parsing of wrong repo location syntax - create: don't create hard link refs to failed files, mount: handle invalid hard link refs, #2092 - detect mingw byte order, #2073 - creating a new segment: use "xb" mode, #2099 - mount: umount on SIGINT/^C when in foreground, #2082 Other changes: - binary: use fixed AND freshly compiled pyinstaller bootloader, #2002 - xattr: ignore empty names returned by llistxattr(2) et al - Enable the fault handler: install handlers for the SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL signals to dump the Python traceback. - Also print a traceback on SIGUSR2. - borg change-passphrase: print key location (simplify making a backup of it) - officially support Python 3.6 (setup.py: add Python 3.6 qualifier) - tests: - vagrant / travis / tox: add Python 3.6 based testing - vagrant: fix openbsd repo, #2042 - vagrant: fix the freebsd64 machine, #2037 #2067 - vagrant: use python 3.5.3 to build binaries, #2078 - vagrant: use osxfuse 3.5.4 for tests / to build binaries vagrant: improve darwin64 VM settings - travis: fix osxfuse install (fixes OS X testing on Travis CI) - travis: require succeeding OS X tests, #2028 - travis: use latest pythons for OS X based testing - use pytest-xdist to parallelize testing - fix xattr test race condition, #2047 - setup.cfg: fix pytest deprecation warning, #2050 - docs: - language clarification - VM backup FAQ - borg create: document how to backup stdin, #2013 - borg upgrade: fix incorrect title levels - add CVE numbers for issues fixed in 1.0.9, #2106 - fix typos (taken from Debian package patch) - remote: include data hexdump in "unexpected RPC data" error message - remote: log SSH command line at debug level - API_VERSION: use numberspaces, #2023 - remove .github from pypi package, #2051 - add pip and setuptools to requirements file, #2030 - SyncFile: fix use of fd object after close (cosmetic) - Manifest.in: simplify, exclude \*.{so,dll,orig}, #2066 - ignore posix_fadvise errors in repository.py, #2095 (works around issues with docker on ARM) - make LoggedIO.close_segment reentrant, avoid reentrance Version 1.0.9 (2016-12-20) -------------------------- Security fixes: - A flaw in the cryptographic authentication scheme in Borg allowed an attacker to spoof the manifest. See :ref:`tam_vuln` above for the steps you should take. CVE-2016-10099 was assigned to this vulnerability. - borg check: When rebuilding the manifest (which should only be needed very rarely) duplicate archive names would be handled on a "first come first serve" basis, allowing an attacker to apparently replace archives. CVE-2016-10100 was assigned to this vulnerability. Bug fixes: - borg check: - rebuild manifest if it's corrupted - skip corrupted chunks during manifest rebuild - fix TypeError in integrity error handler, #1903, #1894 - fix location parser for archives with @ char (regression introduced in 1.0.8), #1930 - fix wrong duration/timestamps if system clock jumped during a create - fix progress display not updating if system clock jumps backwards - fix checkpoint interval being incorrect if system clock jumps Other changes: - docs: - add python3-devel as a dependency for cygwin-based installation - clarify extract is relative to current directory - FAQ: fix link to changelog - markup fixes - tests: - test_get\_(cache|keys)_dir: clean env state, #1897 - get back pytest's pretty assertion failures, #1938 - setup.py build_usage: - fixed build_usage not processing all commands - fixed build_usage not generating includes for debug commands Version 1.0.9rc1 (2016-11-27) ----------------------------- Bug fixes: - files cache: fix determination of newest mtime in backup set (which is used in cache cleanup and led to wrong "A" [added] status for unchanged files in next backup), #1860. - borg check: - fix incorrectly reporting attic 0.13 and earlier archives as corrupt - handle repo w/o objects gracefully and also bail out early if repo is *completely* empty, #1815. - fix tox/pybuild in 1.0-maint - at xattr module import time, loggers are not initialized yet New features: - borg umount exposed already existing umount code via the CLI api, so users can use it, which is more consistent than using borg to mount and fusermount -u (or umount) to un-mount, #1855. - implement borg create --noatime --noctime, fixes #1853 Other changes: - docs: - display README correctly on PyPI - improve cache / index docs, esp. files cache docs, fixes #1825 - different pattern matching for --exclude, #1779 - datetime formatting examples for {now} placeholder, #1822 - clarify passphrase mode attic repo upgrade, #1854 - clarify --umask usage, #1859 - clarify how to choose PR target branch - clarify prune behavior for different archive contents, #1824 - fix PDF issues, add logo, fix authors, headings, TOC - move security verification to support section - fix links in standalone README (:ref: tags) - add link to security contact in README - add FAQ about security - move fork differences to FAQ - add more details about resource usage - tests: skip remote tests on cygwin, #1268 - travis: - allow OS X failures until the brew cask osxfuse issue is fixed - caskroom osxfuse-beta gone, it's osxfuse now (3.5.3) - vagrant: - upgrade OSXfuse / FUSE for macOS to 3.5.3 - remove llfuse from tox.ini at a central place - do not try to install llfuse on centos6 - fix FUSE test for darwin, #1546 - add windows virtual machine with cygwin - Vagrantfile cleanup / code deduplication Version 1.1.0b2 (2016-10-01) ---------------------------- Bug fixes: - fix incorrect preservation of delete tags, leading to "object count mismatch" on borg check, #1598. This only occurred with 1.1.0b1 (not with 1.0.x) and is normally fixed by running another borg create/delete/prune. - fix broken --progress for double-cell paths (e.g. CJK), #1624 - borg recreate: also catch SIGHUP - FUSE: - fix hardlinks in versions view, #1599 - add parameter check to ItemCache.get to make potential failures more clear New features: - Archiver, RemoteRepository: add --remote-ratelimit (send data) - borg help compression, #1582 - borg check: delete chunks with integrity errors, #1575, so they can be "repaired" immediately and maybe healed later. - archives filters concept (refactoring/unifying older code) - covers --first/--last/--prefix/--sort-by options - currently used for borg list/info/delete Other changes: - borg check --verify-data slightly tuned (use get_many()) - change {utcnow} and {now} to ISO-8601 format ("T" date/time separator) - repo check: log transaction IDs, improve object count mismatch diagnostic - Vagrantfile: use TW's fresh-bootloader pyinstaller branch - fix module names in api.rst - hashindex: bump api_version Version 1.1.0b1 (2016-08-28) ---------------------------- New features: - new commands: - borg recreate: re-create existing archives, #787 #686 #630 #70, also see #757, #770. - selectively remove files/dirs from old archives - re-compress data - re-chunkify data, e.g. to have upgraded Attic / Borg 0.xx archives deduplicate with Borg 1.x archives or to experiment with chunker-params. - borg diff: show differences between archives - borg with-lock: execute a command with the repository locked, #990 - borg create: - Flexible compression with pattern matching on path/filename, and LZ4 heuristic for deciding compressibility, #810, #1007 - visit files in inode order (better speed, esp. for large directories and rotating disks) - in-file checkpoints, #1217 - increased default checkpoint interval to 30 minutes (was 5 minutes), #896 - added uuid archive format tag, #1151 - save mountpoint directories with --one-file-system, makes system restore easier, #1033 - Linux: added support for some BSD flags, #1050 - add 'x' status for excluded paths, #814 - also means files excluded via UF_NODUMP, #1080 - borg check: - will not produce the "Checking segments" output unless new --progress option is passed, #824. - --verify-data to verify data cryptographically on the client, #975 - borg list, #751, #1179 - removed {formatkeys}, see "borg list --help" - --list-format is deprecated, use --format instead - --format now also applies to listing archives, not only archive contents, #1179 - now supports the usual [PATH [PATHS…]] syntax and excludes - new keys: csize, num_chunks, unique_chunks, NUL - supports guaranteed_available hashlib hashes (to avoid varying functionality depending on environment), which includes the SHA1 and SHA2 family as well as MD5 - borg prune: - to better visualize the "thinning out", we now list all archives in reverse time order. rephrase and reorder help text. - implement --keep-last N via --keep-secondly N, also --keep-minutely. assuming that there is not more than 1 backup archive made in 1s, --keep-last N and --keep-secondly N are equivalent, #537 - cleanup checkpoints except the latest, #1008 - borg extract: - added --progress, #1449 - Linux: limited support for BSD flags, #1050 - borg info: - output is now more similar to borg create --stats, #977 - borg mount: - provide "borgfs" wrapper for borg mount, enables usage via fstab, #743 - "versions" mount option - when used with a repository mount, this gives a merged, versioned view of the files in all archives, #729 - repository: - added progress information to commit/compaction phase (often takes some time when deleting/pruning), #1519 - automatic recovery for some forms of repository inconsistency, #858 - check free space before going forward with a commit, #1336 - improved write performance (esp. for rotating media), #985 - new IO code for Linux - raised default segment size to approx 512 MiB - improved compaction performance, #1041 - reduced client CPU load and improved performance for remote repositories, #940 - options that imply output (--show-rc, --show-version, --list, --stats, --progress) don't need -v/--info to have that output displayed, #865 - add archive comments (via borg (re)create --comment), #842 - borg list/prune/delete: also output archive id, #731 - --show-version: shows/logs the borg version, #725 - added --debug-topic for granular debug logging, #1447 - use atomic file writing/updating for configuration and key files, #1377 - BORG_KEY_FILE environment variable, #1001 - self-testing module, #970 Bug fixes: - list: fixed default output being produced if --format is given with empty parameter, #1489 - create: fixed overflowing progress line with CJK and similar characters, #1051 - prune: fixed crash if --prefix resulted in no matches, #1029 - init: clean up partial repo if passphrase input is aborted, #850 - info: quote cmdline arguments that have spaces in them - fix hardlinks failing in some cases for extracting subtrees, #761 Other changes: - replace stdlib hmac with OpenSSL, zero-copy decrypt (10-15% increase in performance of hash-lists and extract). - improved chunker performance, #1021 - open repository segment files in exclusive mode (fail-safe), #1134 - improved error logging, #1440 - Source: - pass meta-data around, #765 - move some constants to new constants module - better readability and fewer errors with namedtuples, #823 - moved source tree into src/ subdirectory, #1016 - made borg.platform a package, #1113 - removed dead crypto code, #1032 - improved and ported parts of the test suite to py.test, #912 - created data classes instead of passing dictionaries around, #981, #1158, #1161 - cleaned up imports, #1112 - Docs: - better help texts and sphinx reproduction of usage help: - Group options - Nicer list of options in Sphinx - Deduplicate 'Common options' (including --help) - chunker: added some insights by "Voltara", #903 - clarify what "deduplicated size" means - fix / update / add package list entries - added a SaltStack usage example, #956 - expanded FAQ - new contributors in AUTHORS! - Tests: - vagrant: add ubuntu/xenial 64bit - this box has still some issues - ChunkBuffer: add test for leaving partial chunk in buffer, fixes #945 Version 1.0.8 (2016-10-29) -------------------------- Bug fixes: - RemoteRepository: Fix busy wait in call_many, #940 New features: - implement borgmajor/borgminor/borgpatch placeholders, #1694 {borgversion} was already there (full version string). With the new placeholders you can now also get e.g. 1 or 1.0 or 1.0.8. Other changes: - avoid previous_location mismatch, #1741 due to the changed canonicalization for relative paths in PR #1711 / #1655 (implement /./ relpath hack), there would be a changed repo location warning and the user would be asked if this is ok. this would break automation and require manual intervention, which is unwanted. thus, we automatically fix the previous_location config entry, if it only changed in the expected way, but still means the same location. - docs: - deployment.rst: do not use bare variables in ansible snippet - add clarification about append-only mode, #1689 - setup.py: add comment about requiring llfuse, #1726 - update usage.rst / api.rst - repo url / archive location docs + typo fix - quickstart: add a comment about other (remote) filesystems - vagrant / tests: - no chown when rsyncing (fixes boxes w/o vagrant group) - fix FUSE permission issues on linux/freebsd, #1544 - skip FUSE test for borg binary + fakeroot - ignore security.selinux xattrs, fixes tests on centos, #1735 Version 1.0.8rc1 (2016-10-17) ----------------------------- Bug fixes: - fix signal handling (SIGINT, SIGTERM, SIGHUP), #1620 #1593 Fixes e.g. leftover lock files for quickly repeated signals (e.g. Ctrl-C Ctrl-C) or lost connections or systemd sending SIGHUP. - progress display: adapt formatting to narrow screens, do not crash, #1628 - borg create --read-special - fix crash on broken symlink, #1584. also correctly processes broken symlinks. before this regressed to a crash (5b45385) a broken symlink would've been skipped. - process_symlink: fix missing backup_io() Fixes a chmod/chown/chgrp/unlink/rename/... crash race between getting dirents and dispatching to process_symlink. - yes(): abort on wrong answers, saying so, #1622 - fixed exception borg serve raised when connection was closed before reposiory was openend. add an error message for this. - fix read-from-closed-FD issue, #1551 (this seems not to get triggered in 1.0.x, but was discovered in master) - hashindex: fix iterators (always raise StopIteration when exhausted) (this seems not to get triggered in 1.0.x, but was discovered in master) - enable relative paths in ssh:// repo URLs, via /./relpath hack, #1655 - allow repo paths with colons, #1705 - update changed repo location immediately after acceptance, #1524 - fix debug get-obj / delete-obj crash if object not found and remote repo, #1684 - pyinstaller: use a spec file to build borg.exe binary, exclude osxfuse dylib on Mac OS X (avoids mismatch lib <-> driver), #1619 New features: - add "borg key export" / "borg key import" commands, #1555, so users are able to backup / restore their encryption keys more easily. Supported formats are the keyfile format used by borg internally and a special "paper" format with by line checksums for printed backups. For the paper format, the import is an interactive process which checks each line as soon as it is input. - add "borg debug-refcount-obj" to determine a repo objects' referrer counts, #1352 Other changes: - add "borg debug ..." subcommands (borg debug-* still works, but will be removed in borg 1.1) - setup.py: Add subcommand support to build_usage. - remote: change exception message for unexpected RPC data format to indicate dataflow direction. - improved messages / error reporting: - IntegrityError: add placeholder for message, so that the message we give appears not only in the traceback, but also in the (short) error message, #1572 - borg.key: include chunk id in exception msgs, #1571 - better messages for cache newer than repo, #1700 - vagrant (testing/build VMs): - upgrade OSXfuse / FUSE for macOS to 3.5.2 - update Debian Wheezy boxes, #1686 - openbsd / netbsd: use own boxes, fixes misc rsync installation and FUSE/llfuse related testing issues, #1695 #1696 #1670 #1671 #1728 - docs: - add docs for "key export" and "key import" commands, #1641 - fix inconsistency in FAQ (pv-wrapper). - fix second block in "Easy to use" section not showing on GitHub, #1576 - add bestpractices badge - link reference docs and faq about BORG_FILES_CACHE_TTL, #1561 - improve borg info --help, explain size infos, #1532 - add release signing key / security contact to README, #1560 - add contribution guidelines for developers - development.rst: add sphinx_rtd_theme to the sphinx install command - adjust border color in borg.css - add debug-info usage help file - internals.rst: fix typos - setup.py: fix build_usage to always process all commands - added docs explaining multiple --restrict-to-path flags, #1602 - add more specific warning about write-access debug commands, #1587 - clarify FAQ regarding backup of virtual machines, #1672 - tests: - work around FUSE xattr test issue with recent fakeroot - simplify repo/hashindex tests - travis: test FUSE-enabled borg, use trusty to have a recent FUSE - re-enable FUSE tests for RemoteArchiver (no deadlocks any more) - clean env for pytest based tests, #1714 - fuse_mount contextmanager: accept any options Version 1.0.7 (2016-08-19) -------------------------- Security fixes: - borg serve: fix security issue with remote repository access, #1428 If you used e.g. --restrict-to-path /path/client1/ (with or without trailing slash does not make a difference), it acted like a path prefix match using /path/client1 (note the missing trailing slash) - the code then also allowed working in e.g. /path/client13 or /path/client1000. As this could accidentally lead to major security/privacy issues depending on the paths you use, the behaviour was changed to be a strict directory match. That means --restrict-to-path /path/client1 (with or without trailing slash does not make a difference) now uses /path/client1/ internally (note the trailing slash here!) for matching and allows precisely that path AND any path below it. So, /path/client1 is allowed, /path/client1/repo1 is allowed, but not /path/client13 or /path/client1000. If you willingly used the undocumented (dangerous) previous behaviour, you may need to rearrange your --restrict-to-path paths now. We are sorry if that causes work for you, but we did not want a potentially dangerous behaviour in the software (not even using a for-backwards-compat option). Bug fixes: - fixed repeated LockTimeout exceptions when borg serve tried to write into a already write-locked repo (e.g. by a borg mount), #502 part b) This was solved by the fix for #1220 in 1.0.7rc1 already. - fix cosmetics + file leftover for "not a valid borg repository", #1490 - Cache: release lock if cache is invalid, #1501 - borg extract --strip-components: fix leak of preloaded chunk contents - Repository, when a InvalidRepository exception happens: - fix spurious, empty lock.roster - fix repo not closed cleanly New features: - implement borg debug-info, fixes #1122 (just calls already existing code via cli, same output as below tracebacks) Other changes: - skip the O_NOATIME test on GNU Hurd, fixes #1315 (this is a very minor issue and the GNU Hurd project knows the bug) - document using a clean repo to test / build the release Version 1.0.7rc2 (2016-08-13) ----------------------------- Bug fixes: - do not write objects to repository that are bigger than the allowed size, borg will reject reading them, #1451. Important: if you created archives with many millions of files or directories, please verify if you can open them successfully, e.g. try a "borg list REPO::ARCHIVE". - lz4 compression: dynamically enlarge the (de)compression buffer, the static buffer was not big enough for archives with extremely many items, #1453 - larger item metadata stream chunks, raise archive item limit by 8x, #1452 - fix untracked segments made by moved DELETEs, #1442 Impact: Previously (metadata) segments could become untracked when deleting data, these would never be cleaned up. - extended attributes (xattrs) related fixes: - fixed a race condition in xattrs querying that led to the entire file not being backed up (while logging the error, exit code = 1), #1469 - fixed a race condition in xattrs querying that led to a crash, #1462 - raise OSError including the error message derived from errno, deal with path being a integer FD Other changes: - print active env var override by default, #1467 - xattr module: refactor code, deduplicate, clean up - repository: split object size check into too small and too big - add a transaction_id assertion, so borg init on a broken (inconsistent) filesystem does not look like a coding error in borg, but points to the real problem. - explain confusing TypeError caused by compat support for old servers, #1456 - add forgotten usage help file from build_usage - refactor/unify buffer code into helpers.Buffer class, add tests - docs: - document archive limitation, #1452 - improve prune examples Version 1.0.7rc1 (2016-08-05) ----------------------------- Bug fixes: - fix repo lock deadlocks (related to lock upgrade), #1220 - catch unpacker exceptions, resync, #1351 - fix borg break-lock ignoring BORG_REPO env var, #1324 - files cache performance fixes (fixes unnecessary re-reading/chunking/ hashing of unmodified files for some use cases): - fix unintended file cache eviction, #1430 - implement BORG_FILES_CACHE_TTL, update FAQ, raise default TTL from 10 to 20, #1338 - FUSE: - cache partially read data chunks (performance), #965, #966 - always create a root dir, #1125 - use an OrderedDict for helptext, making the build reproducible, #1346 - RemoteRepository init: always call close on exceptions, #1370 (cosmetic) - ignore stdout/stderr broken pipe errors (cosmetic), #1116 New features: - better borg versions management support (useful esp. for borg servers wanting to offer multiple borg versions and for clients wanting to choose a specific server borg version), #1392: - add BORG_VERSION environment variable before executing "borg serve" via ssh - add new placeholder {borgversion} - substitute placeholders in --remote-path - borg init --append-only option (makes using the more secure append-only mode more convenient. when used remotely, this requires 1.0.7+ also on the borg server), #1291. Other changes: - Vagrantfile: - darwin64: upgrade to FUSE for macOS 3.4.1 (aka osxfuse), #1378 - xenial64: use user "ubuntu", not "vagrant" (as usual), #1331 - tests: - fix FUSE tests on OS X, #1433 - docs: - FAQ: add backup using stable filesystem names recommendation - FAQ about glibc compatibility added, #491, glibc-check improved - FAQ: 'A' unchanged file; remove ambiguous entry age sentence. - OS X: install pkg-config to build with FUSE support, fixes #1400 - add notes about shell/sudo pitfalls with env. vars, #1380 - added platform feature matrix - implement borg debug-dump-repo-objs Version 1.0.6 (2016-07-12) -------------------------- Bug fixes: - Linux: handle multiple LD_PRELOAD entries correctly, #1314, #1111 - Fix crash with unclear message if the libc is not found, #1314, #1111 Other changes: - tests: - Fixed O_NOATIME tests for Solaris and GNU Hurd, #1315 - Fixed sparse file tests for (file) systems not supporting it, #1310 - docs: - Fixed syntax highlighting, #1313 - misc docs: added data processing overview picture Version 1.0.6rc1 (2016-07-10) ----------------------------- New features: - borg check --repair: heal damaged files if missing chunks re-appear (e.g. if the previously missing chunk was added again in a later backup archive), #148. (*) Also improved logging. Bug fixes: - sync_dir: silence fsync() failing with EINVAL, #1287 Some network filesystems (like smbfs) don't support this and we use this in repository code. - borg mount (FUSE): - fix directories being shadowed when contained paths were also specified, #1295 - raise I/O Error (EIO) on damaged files (unless -o allow_damaged_files is used), #1302. (*) - borg extract: warn if a damaged file is extracted, #1299. (*) - Added some missing return code checks (ChunkIndex._add, hashindex_resize). - borg check: fix/optimize initial hash table size, avoids resize of the table. Other changes: - tests: - add more FUSE tests, #1284 - deduplicate FUSE (u)mount code - fix borg binary test issues, #862 - docs: - changelog: added release dates to older borg releases - fix some sphinx (docs generator) warnings, #881 Notes: (*) Some features depend on information (chunks_healthy list) added to item metadata when a file with missing chunks was "repaired" using all-zero replacement chunks. The chunks_healthy list is generated since borg 1.0.4, thus borg can't recognize such "repaired" (but content-damaged) files if the repair was done with an older borg version. Version 1.0.5 (2016-07-07) -------------------------- Bug fixes: - borg mount: fix FUSE crash in xattr code on Linux introduced in 1.0.4, #1282 Other changes: - backport some FAQ entries from master branch - add release helper scripts - Vagrantfile: - centos6: no FUSE, don't build binary - add xz for redhat-like dists Version 1.0.4 (2016-07-07) -------------------------- New features: - borg serve --append-only, #1168 This was included because it was a simple change (append-only functionality was already present via repository config file) and makes better security now practically usable. - BORG_REMOTE_PATH environment variable, #1258 This was included because it was a simple change (--remote-path cli option was already present) and makes borg much easier to use if you need it. - Repository: cleanup incomplete transaction on "no space left" condition. In many cases, this can avoid a 100% full repo filesystem (which is very problematic as borg always needs free space - even to delete archives). Bug fixes: - Fix wrong handling and reporting of OSErrors in borg create, #1138. This was a serious issue: in the context of "borg create", errors like repository I/O errors (e.g. disk I/O errors, ssh repo connection errors) were handled badly and did not lead to a crash (which would be good for this case, because the repo transaction would be incomplete and trigger a transaction rollback to clean up). Now, error handling for source files is cleanly separated from every other error handling, so only problematic input files are logged and skipped. - Implement fail-safe error handling for borg extract. Note that this isn't nearly as critical as the borg create error handling bug, since nothing is written to the repo. So this was "merely" misleading error reporting. - Add missing error handler in directory attr restore loop. - repo: make sure write data hits disk before the commit tag (#1236) and also sync the containing directory. - FUSE: getxattr fail must use errno.ENOATTR, #1126 (fixes Mac OS X Finder malfunction: "zero bytes" file length, access denied) - borg check --repair: do not lose information about the good/original chunks. If we do not lose the original chunk IDs list when "repairing" a file (replacing missing chunks with all-zero chunks), we have a chance to "heal" the file back into its original state later, in case the chunks re-appear (e.g. in a fresh backup). Healing is not implemented yet, see #148. - fixes for --read-special mode: - ignore known files cache, #1241 - fake regular file mode, #1214 - improve symlinks handling, #1215 - remove passphrase from subprocess environment, #1105 - Ignore empty index file (will trigger index rebuild), #1195 - add missing placeholder support for --prefix, #1027 - improve exception handling for placeholder replacement - catch and format exceptions in arg parsing - helpers: fix "undefined name 'e'" in exception handler - better error handling for missing repo manifest, #1043 - borg delete: - make it possible to delete a repo without manifest - borg delete --forced allows to delete corrupted archives, #1139 - borg check: - make borg check work for empty repo - fix resync and msgpacked item qualifier, #1135 - rebuild_manifest: fix crash if 'name' or 'time' key were missing. - better validation of item metadata dicts, #1130 - better validation of archive metadata dicts - close the repo on exit - even if rollback did not work, #1197. This is rather cosmetic, it avoids repo closing in the destructor. - tests: - fix sparse file test, #1170 - flake8: ignore new F405, #1185 - catch "invalid argument" on cygwin, #257 - fix sparseness assertion in test prep, #1264 Other changes: - make borg build/work on OpenSSL 1.0 and 1.1, #1187 - docs / help: - fix / clarify prune help, #1143 - fix "patterns" help formatting - add missing docs / help about placeholders - resources: rename atticmatic to borgmatic - document sshd settings, #545 - more details about checkpoints, add split trick, #1171 - support docs: add freenode web chat link, #1175 - add prune visualization / example, #723 - add note that Fnmatch is default, #1247 - make clear that lzma levels > 6 are a waste of cpu cycles - add a "do not edit" note to auto-generated files, #1250 - update cygwin installation docs - repository interoperability with borg master (1.1dev) branch: - borg check: read item metadata keys from manifest, #1147 - read v2 hints files, #1235 - fix hints file "unknown version" error handling bug - tests: add tests for format_line - llfuse: update version requirement for freebsd - Vagrantfile: - use openbsd 5.9, #716 - do not install llfuse on netbsd (broken) - update OSXfuse to version 3.3.3 - use Python 3.5.2 to build the binaries - glibc compatibility checker: scripts/glibc_check.py - add .eggs to .gitignore Version 1.0.3 (2016-05-20) -------------------------- Bug fixes: - prune: avoid that checkpoints are kept and completed archives are deleted in a prune run), #997 - prune: fix commandline argument validation - some valid command lines were considered invalid (annoying, but harmless), #942 - fix capabilities extraction on Linux (set xattrs last, after chown()), #1069 - repository: fix commit tags being seen in data - when probing key files, do binary reads. avoids crash when non-borg binary files are located in borg's key files directory. - handle SIGTERM and make a clean exit - avoids orphan lock files. - repository cache: don't cache large objects (avoid using lots of temp. disk space), #1063 Other changes: - Vagrantfile: OS X: update osxfuse / install lzma package, #933 - setup.py: add check for platform_darwin.c - setup.py: on freebsd, use a llfuse release that builds ok - docs / help: - update readthedocs URLs, #991 - add missing docs for "borg break-lock", #992 - borg create help: add some words to about the archive name - borg create help: document format tags, #894 Version 1.0.2 (2016-04-16) -------------------------- Bug fixes: - fix malfunction and potential corruption on (nowadays rather rare) big-endian architectures or bi-endian archs in (rare) BE mode. #886, #889 cache resync / index merge was malfunctioning due to this, potentially leading to data loss. borg info had cosmetic issues (displayed wrong values). note: all (widespread) little-endian archs (like x86/x64) or bi-endian archs in (widespread) LE mode (like ARMEL, MIPSEL, ...) were NOT affected. - add overflow and range checks for 1st (special) uint32 of the hashindex values, switch from int32 to uint32. - fix so that refcount will never overflow, but just stick to max. value after a overflow would have occurred. - borg delete: fix --cache-only for broken caches, #874 Makes --cache-only idempotent: it won't fail if the cache is already deleted. - fixed borg create --one-file-system erroneously traversing into other filesystems (if starting fs device number was 0), #873 - workround a bug in Linux fadvise FADV_DONTNEED, #907 Other changes: - better test coverage for hashindex, incl. overflow testing, checking correct computations so endianness issues would be discovered. - reproducible doc for ProgressIndicator*, make the build reproducible. - use latest llfuse for vagrant machines - docs: - use /path/to/repo in examples, fixes #901 - fix confusing usage of "repo" as archive name (use "arch") Version 1.0.1 (2016-04-08) -------------------------- New features: Usually there are no new features in a bugfix release, but these were added due to their high impact on security/safety/speed or because they are fixes also: - append-only mode for repositories, #809, #36 (see docs) - borg create: add --ignore-inode option to make borg detect unmodified files even if your filesystem does not have stable inode numbers (like sshfs and possibly CIFS). - add options --warning, --error, --critical for missing log levels, #826. it's not recommended to suppress warnings or errors, but the user may decide this on his own. note: --warning is not given to borg serve so a <= 1.0.0 borg will still work as server (it is not needed as it is the default). do not use --error or --critical when using a <= 1.0.0 borg server. Bug fixes: - fix silently skipping EIO, #748 - add context manager for Repository (avoid orphan repository locks), #285 - do not sleep for >60s while waiting for lock, #773 - unpack file stats before passing to FUSE - fix build on illumos - don't try to backup doors or event ports (Solaris and derivates) - remove useless/misleading libc version display, #738 - test suite: reset exit code of persistent archiver, #844 - RemoteRepository: clean up pipe if remote open() fails - Remote: don't print tracebacks for Error exceptions handled downstream, #792 - if BORG_PASSPHRASE is present but wrong, don't prompt for password, but fail instead, #791 - ArchiveChecker: move "orphaned objects check skipped" to INFO log level, #826 - fix capitalization, add ellipses, change log level to debug for 2 messages, #798 Other changes: - update llfuse requirement, llfuse 1.0 works - update OS / dist packages on build machines, #717 - prefer showing --info over -v in usage help, #859 - docs: - fix cygwin requirements (gcc-g++) - document how to debug / file filesystem issues, #664 - fix reproducible build of api docs - RTD theme: CSS !important overwrite, #727 - Document logo font. Recreate logo png. Remove GIMP logo file. Version 1.0.0 (2016-03-05) -------------------------- The major release number change (0.x -> 1.x) indicates bigger incompatible changes, please read the compatibility notes, adapt / test your scripts and check your backup logs. Compatibility notes: - drop support for python 3.2 and 3.3, require 3.4 or 3.5, #221 #65 #490 note: we provide binaries that include python 3.5.1 and everything else needed. they are an option in case you are stuck with < 3.4 otherwise. - change encryption to be on by default (using "repokey" mode) - moved keyfile keys from ~/.borg/keys to ~/.config/borg/keys, you can either move them manually or run "borg upgrade " - remove support for --encryption=passphrase, use borg migrate-to-repokey to switch to repokey mode, #97 - remove deprecated --compression , use --compression zlib, instead in case of 0, you could also use --compression none - remove deprecated --hourly/daily/weekly/monthly/yearly use --keep-hourly/daily/weekly/monthly/yearly instead - remove deprecated --do-not-cross-mountpoints, use --one-file-system instead - disambiguate -p option, #563: - -p now is same as --progress - -P now is same as --prefix - remove deprecated "borg verify", use "borg extract --dry-run" instead - cleanup environment variable semantics, #355 the environment variables used to be "yes sayers" when set, this was conceptually generalized to "automatic answerers" and they just give their value as answer (as if you typed in that value when being asked). See the "usage" / "Environment Variables" section of the docs for details. - change the builtin default for --chunker-params, create 2MiB chunks, #343 --chunker-params new default: 19,23,21,4095 - old default: 10,23,16,4095 one of the biggest issues with borg < 1.0 (and also attic) was that it had a default target chunk size of 64kiB, thus it created a lot of chunks and thus also a huge chunk management overhead (high RAM and disk usage). please note that the new default won't change the chunks that you already have in your repository. the new big chunks do not deduplicate with the old small chunks, so expect your repo to grow at least by the size of every changed file and in the worst case (e.g. if your files cache was lost / is not used) by the size of every file (minus any compression you might use). in case you want to immediately see a much lower resource usage (RAM / disk) for chunks management, it might be better to start with a new repo than continuing in the existing repo (with an existing repo, you'ld have to wait until all archives with small chunks got pruned to see a lower resource usage). if you used the old --chunker-params default value (or if you did not use --chunker-params option at all) and you'ld like to continue using small chunks (and you accept the huge resource usage that comes with that), just explicitly use borg create --chunker-params=10,23,16,4095. - archive timestamps: the 'time' timestamp now refers to archive creation start time (was: end time), the new 'time_end' timestamp refers to archive creation end time. This might affect prune if your backups take rather long. if you give a timestamp via cli this is stored into 'time', therefore it now needs to mean archive creation start time. New features: - implement password roundtrip, #695 Bug fixes: - remote end does not need cache nor keys directories, do not create them, #701 - added retry counter for passwords, #703 Other changes: - fix compiler warnings, #697 - docs: - update README.rst to new changelog location in docs/changes.rst - add Teemu to AUTHORS - changes.rst: fix old chunker params, #698 - FAQ: how to limit bandwidth Version 1.0.0rc2 (2016-02-28) ----------------------------- New features: - format options for location: user, pid, fqdn, hostname, now, utcnow, user - borg list --list-format - borg prune -v --list enables the keep/prune list output, #658 Bug fixes: - fix _open_rb noatime handling, #657 - add a simple archivename validator, #680 - borg create --stats: show timestamps in localtime, use same labels/formatting as borg info, #651 - llfuse compatibility fixes (now compatible with: 0.40, 0.41, 0.42) Other changes: - it is now possible to use "pip install borgbackup[fuse]" to automatically install the llfuse dependency using the correct version requirement for it. you still need to care about having installed the FUSE / build related OS package first, though, so that building llfuse can succeed. - Vagrant: drop Ubuntu Precise (12.04) - does not have Python >= 3.4 - Vagrant: use pyinstaller v3.1.1 to build binaries - docs: - borg upgrade: add to docs that only LOCAL repos are supported - borg upgrade also handles borg 0.xx -> 1.0 - use pip extras or requirements file to install llfuse - fix order in release process - updated usage docs and other minor / cosmetic fixes - verified borg examples in docs, #644 - freebsd dependency installation and FUSE configuration, #649 - add example how to restore a raw device, #671 - add a hint about the dev headers needed when installing from source - add examples for delete (and handle delete after list, before prune), #656 - update example for borg create -v --stats (use iso datetime format), #663 - added example to BORG_RSH docs - "connection closed by remote": add FAQ entry and point to issue #636 Version 1.0.0rc1 (2016-02-07) ----------------------------- New features: - borg migrate-to-repokey ("passphrase" -> "repokey" encryption key mode) - implement --short for borg list REPO, #611 - implement --list for borg extract (consistency with borg create) - borg serve: overwrite client's --restrict-to-path with ssh forced command's option value (but keep everything else from the client commandline), #544 - use $XDG_CONFIG_HOME/keys for keyfile keys (~/.config/borg/keys), #515 - "borg upgrade" moves the keyfile keys to the new location - display both archive creation start and end time in "borg info", #627 Bug fixes: - normalize trailing slashes for the repository path, #606 - Cache: fix exception handling in __init__, release lock, #610 Other changes: - suppress unneeded exception context (PEP 409), simpler tracebacks - removed special code needed to deal with imperfections / incompatibilities / missing stuff in py 3.2/3.3, simplify code that can be done simpler in 3.4 - removed some version requirements that were kept on old versions because newer did not support py 3.2 any more - use some py 3.4+ stdlib code instead of own/openssl/pypi code: - use os.urandom instead of own cython openssl RAND_bytes wrapper, #493 - use hashlib.pbkdf2_hmac from py stdlib instead of own openssl wrapper - use hmac.compare_digest instead of == operator (constant time comparison) - use stat.filemode instead of homegrown code - use "mock" library from stdlib, #145 - remove borg.support (with non-broken argparse copy), it is ok in 3.4+, #358 - Vagrant: copy CHANGES.rst as symlink, #592 - cosmetic code cleanups, add flake8 to tox/travis, #4 - docs / help: - make "borg -h" output prettier, #591 - slightly rephrase prune help - add missing example for --list option of borg create - quote exclude line that includes an asterisk to prevent shell expansion - fix dead link to license - delete Ubuntu Vivid, it is not supported anymore (EOL) - OS X binary does not work for older OS X releases, #629 - borg serve's special support for forced/original ssh commands, #544 - misc. updates and fixes Version 0.30.0 (2016-01-23) --------------------------- Compatibility notes: - you may need to use -v (or --info) more often to actually see output emitted at INFO log level (because it is suppressed at the default WARNING log level). See the "general" section in the usage docs. - for borg create, you need --list (additionally to -v) to see the long file list (was needed so you can have e.g. --stats alone without the long list) - see below about BORG_DELETE_I_KNOW_WHAT_I_AM_DOING (was: BORG_CHECK_I_KNOW_WHAT_I_AM_DOING) Bug fixes: - fix crash when using borg create --dry-run --keep-tag-files, #570 - make sure teardown with cleanup happens for Cache and RepositoryCache, avoiding leftover locks and TEMP dir contents, #285 (partially), #548 - fix locking KeyError, partial fix for #502 - log stats consistently, #526 - add abbreviated weekday to timestamp format, fixes #496 - strip whitespace when loading exclusions from file - unset LD_LIBRARY_PATH before invoking ssh, fixes strange OpenSSL library version warning when using the borg binary, #514 - add some error handling/fallback for C library loading, #494 - added BORG_DELETE_I_KNOW_WHAT_I_AM_DOING for check in "borg delete", #503 - remove unused "repair" rpc method name New features: - borg create: implement exclusions using regular expression patterns. - borg create: implement inclusions using patterns. - borg extract: support patterns, #361 - support different styles for patterns: - fnmatch (`fm:` prefix, default when omitted), like borg <= 0.29. - shell (`sh:` prefix) with `*` not matching directory separators and `**/` matching 0..n directories - path prefix (`pp:` prefix, for unifying borg create pp1 pp2 into the patterns system), semantics like in borg <= 0.29 - regular expression (`re:`), new! - --progress option for borg upgrade (#291) and borg delete - update progress indication more often (e.g. for borg create within big files or for borg check repo), #500 - finer chunker granularity for items metadata stream, #547, #487 - borg create --list now used (additionally to -v) to enable the verbose file list output - display borg version below tracebacks, #532 Other changes: - hashtable size (and thus: RAM and disk consumption) follows a growth policy: grows fast while small, grows slower when getting bigger, #527 - Vagrantfile: use pyinstaller 3.1 to build binaries, freebsd sqlite3 fix, fixes #569 - no separate binaries for centos6 any more because the generic linux binaries also work on centos6 (or in general: on systems with a slightly older glibc than debian7 - dev environment: require virtualenv<14.0 so we get a py32 compatible pip - docs: - add space-saving chunks.archive.d trick to FAQ - important: clarify -v and log levels in usage -> general, please read! - sphinx configuration: create a simple man page from usage docs - add a repo server setup example - disable unneeded SSH features in authorized_keys examples for security. - borg prune only knows "--keep-within" and not "--within" - add gource video to resources docs, #507 - add netbsd install instructions - authors: make it more clear what refers to borg and what to attic - document standalone binary requirements, #499 - rephrase the mailing list section - development docs: run build_api and build_usage before tagging release - internals docs: hash table max. load factor is 0.75 now - markup, typo, grammar, phrasing, clarifications and other fixes. - add gcc gcc-c++ to redhat/fedora/corora install docs, fixes #583 Version 0.29.0 (2015-12-13) --------------------------- Compatibility notes: - when upgrading to 0.29.0 you need to upgrade client as well as server installations due to the locking and commandline interface changes otherwise you'll get an error msg about a RPC protocol mismatch or a wrong commandline option. if you run a server that needs to support both old and new clients, it is suggested that you have a "borg-0.28.2" and a "borg-0.29.0" command. clients then can choose via e.g. "borg --remote-path=borg-0.29.0 ...". - the default waiting time for a lock changed from infinity to 1 second for a better interactive user experience. if the repo you want to access is currently locked, borg will now terminate after 1s with an error message. if you have scripts that shall wait for the lock for a longer time, use --lock-wait N (with N being the maximum wait time in seconds). Bug fixes: - hash table tuning (better chosen hashtable load factor 0.75 and prime initial size of 1031 gave ~1000x speedup in some scenarios) - avoid creation of an orphan lock for one case, #285 - --keep-tag-files: fix file mode and multiple tag files in one directory, #432 - fixes for "borg upgrade" (attic repo converter), #466 - remove --progress isatty magic (and also --no-progress option) again, #476 - borg init: display proper repo URL - fix format of umask in help pages, #463 New features: - implement --lock-wait, support timeout for UpgradableLock, #210 - implement borg break-lock command, #157 - include system info below traceback, #324 - sane remote logging, remote stderr, #461: - remote log output: intercept it and log it via local logging system, with "Remote: " prefixed to message. log remote tracebacks. - remote stderr: output it to local stderr with "Remote: " prefixed. - add --debug and --info (same as --verbose) to set the log level of the builtin logging configuration (which otherwise defaults to warning), #426 note: there are few messages emitted at DEBUG level currently. - optionally configure logging via env var BORG_LOGGING_CONF - add --filter option for status characters: e.g. to show only the added or modified files (and also errors), use "borg create -v --filter=AME ...". - more progress indicators, #394 - use ISO-8601 date and time format, #375 - "borg check --prefix" to restrict archive checking to that name prefix, #206 Other changes: - hashindex_add C implementation (speed up cache re-sync for new archives) - increase FUSE read_size to 1024 (speed up metadata operations) - check/delete/prune --save-space: free unused segments quickly, #239 - increase rpc protocol version to 2 (see also Compatibility notes), #458 - silence borg by default (via default log level WARNING) - get rid of C compiler warnings, #391 - upgrade OS X FUSE to 3.0.9 on the OS X binary build system - use python 3.5.1 to build binaries - docs: - new mailing list borgbackup@python.org, #468 - readthedocs: color and logo improvements - load coverage icons over SSL (avoids mixed content) - more precise binary installation steps - update release procedure docs about OS X FUSE - FAQ entry about unexpected 'A' status for unchanged file(s), #403 - add docs about 'E' file status - add "borg upgrade" docs, #464 - add developer docs about output and logging - clarify encryption, add note about client-side encryption - add resources section, with videos, talks, presentations, #149 - Borg moved to Arch Linux [community] - fix wrong installation instructions for archlinux Version 0.28.2 (2015-11-15) --------------------------- New features: - borg create --exclude-if-present TAGFILE - exclude directories that have the given file from the backup. You can additionally give --keep-tag-files to preserve just the directory roots and the tag-files (but not backup other directory contents), #395, attic #128, attic #142 Other changes: - do not create docs sources at build time (just have them in the repo), completely remove have_cython() hack, do not use the "mock" library at build time, #384 - avoid hidden import, make it easier for PyInstaller, easier fix for #218 - docs: - add description of item flags / status output, fixes #402 - explain how to regenerate usage and API files (build_api or build_usage) and when to commit usage files directly into git, #384 - minor install docs improvements Version 0.28.1 (2015-11-08) --------------------------- Bug fixes: - do not try to build api / usage docs for production install, fixes unexpected "mock" build dependency, #384 Other changes: - avoid using msgpack.packb at import time - fix formatting issue in changes.rst - fix build on readthedocs Version 0.28.0 (2015-11-08) --------------------------- Compatibility notes: - changed return codes (exit codes), see docs. in short: old: 0 = ok, 1 = error. now: 0 = ok, 1 = warning, 2 = error New features: - refactor return codes (exit codes), fixes #61 - add --show-rc option enable "terminating with X status, rc N" output, fixes 58, #351 - borg create backups atime and ctime additionally to mtime, fixes #317 - extract: support atime additionally to mtime - FUSE: support ctime and atime additionally to mtime - support borg --version - emit a warning if we have a slow msgpack installed - borg list --prefix=thishostname- REPO, fixes #205 - Debug commands (do not use except if you know what you do: debug-get-obj, debug-put-obj, debug-delete-obj, debug-dump-archive-items. Bug fixes: - setup.py: fix bug related to BORG_LZ4_PREFIX processing - fix "check" for repos that have incomplete chunks, fixes #364 - borg mount: fix unlocking of repository at umount time, fixes #331 - fix reading files without touching their atime, #334 - non-ascii ACL fixes for Linux, FreeBSD and OS X, #277 - fix acl_use_local_uid_gid() and add a test for it, attic #359 - borg upgrade: do not upgrade repositories in place by default, #299 - fix cascading failure with the index conversion code, #269 - borg check: implement 'cmdline' archive metadata value decoding, #311 - fix RobustUnpacker, it missed some metadata keys (new atime and ctime keys were missing, but also bsdflags). add check for unknown metadata keys. - create from stdin: also save atime, ctime (cosmetic) - use default_notty=False for confirmations, fixes #345 - vagrant: fix msgpack installation on centos, fixes #342 - deal with unicode errors for symlinks in same way as for regular files and have a helpful warning message about how to fix wrong locale setup, fixes #382 - add ACL keys the RobustUnpacker must know about Other changes: - improve file size displays, more flexible size formatters - explicitly commit to the units standard, #289 - archiver: add E status (means that an error occurred when processing this (single) item - do binary releases via "github releases", closes #214 - create: use -x and --one-file-system (was: --do-not-cross-mountpoints), #296 - a lot of changes related to using "logging" module and screen output, #233 - show progress display if on a tty, output more progress information, #303 - factor out status output so it is consistent, fix surrogates removal, maybe fixes #309 - move away from RawConfigParser to ConfigParser - archive checker: better error logging, give chunk_id and sequence numbers (can be used together with borg debug-dump-archive-items). - do not mention the deprecated passphrase mode - emit a deprecation warning for --compression N (giving a just a number) - misc .coverragerc fixes (and coverage measurement improvements), fixes #319 - refactor confirmation code, reduce code duplication, add tests - prettier error messages, fixes #307, #57 - tests: - add a test to find disk-full issues, #327 - travis: also run tests on Python 3.5 - travis: use tox -r so it rebuilds the tox environments - test the generated pyinstaller-based binary by archiver unit tests, #215 - vagrant: tests: announce whether fakeroot is used or not - vagrant: add vagrant user to fuse group for debianoid systems also - vagrant: llfuse install on darwin needs pkgconfig installed - vagrant: use pyinstaller from develop branch, fixes #336 - benchmarks: test create, extract, list, delete, info, check, help, fixes #146 - benchmarks: test with both the binary and the python code - archiver tests: test with both the binary and the python code, fixes #215 - make basic test more robust - docs: - moved docs to borgbackup.readthedocs.org, #155 - a lot of fixes and improvements, use mobile-friendly RTD standard theme - use zlib,6 compression in some examples, fixes #275 - add missing rename usage to docs, closes #279 - include the help offered by borg help in the usage docs, fixes #293 - include a list of major changes compared to attic into README, fixes #224 - add OS X install instructions, #197 - more details about the release process, #260 - fix linux glibc requirement (binaries built on debian7 now) - build: move usage and API generation to setup.py - update docs about return codes, #61 - remove api docs (too much breakage on rtd) - borgbackup install + basics presentation (asciinema) - describe the current style guide in documentation - add section about debug commands - warn about not running out of space - add example for rename - improve chunker params docs, fixes #362 - minor development docs update Version 0.27.0 (2015-10-07) --------------------------- New features: - "borg upgrade" command - attic -> borg one time converter / migration, #21 - temporary hack to avoid using lots of disk space for chunks.archive.d, #235: To use it: rm -rf chunks.archive.d ; touch chunks.archive.d - respect XDG_CACHE_HOME, attic #181 - add support for arbitrary SSH commands, attic #99 - borg delete --cache-only REPO (only delete cache, not REPO), attic #123 Bug fixes: - use Debian 7 (wheezy) to build pyinstaller borgbackup binaries, fixes slow down observed when running the Centos6-built binary on Ubuntu, #222 - do not crash on empty lock.roster, fixes #232 - fix multiple issues with the cache config version check, #234 - fix segment entry header size check, attic #352 plus other error handling improvements / code deduplication there. - always give segment and offset in repo IntegrityErrors Other changes: - stop producing binary wheels, remove docs about it, #147 - docs: - add warning about prune - generate usage include files only as needed - development docs: add Vagrant section - update / improve / reformat FAQ - hint to single-file pyinstaller binaries from README Version 0.26.1 (2015-09-28) --------------------------- This is a minor update, just docs and new pyinstaller binaries. - docs update about python and binary requirements - better docs for --read-special, fix #220 - re-built the binaries, fix #218 and #213 (glibc version issue) - update web site about single-file pyinstaller binaries Note: if you did a python-based installation, there is no need to upgrade. Version 0.26.0 (2015-09-19) --------------------------- New features: - Faster cache sync (do all in one pass, remove tar/compression stuff), #163 - BORG_REPO env var to specify the default repo, #168 - read special files as if they were regular files, #79 - implement borg create --dry-run, attic issue #267 - Normalize paths before pattern matching on OS X, #143 - support OpenBSD and NetBSD (except xattrs/ACLs) - support / run tests on Python 3.5 Bug fixes: - borg mount repo: use absolute path, attic #200, attic #137 - chunker: use off_t to get 64bit on 32bit platform, #178 - initialize chunker fd to -1, so it's not equal to STDIN_FILENO (0) - fix reaction to "no" answer at delete repo prompt, #182 - setup.py: detect lz4.h header file location - to support python < 3.2.4, add less buggy argparse lib from 3.2.6 (#194) - fix for obtaining ``char *`` from temporary Python value (old code causes a compile error on Mint 17.2) - llfuse 0.41 install troubles on some platforms, require < 0.41 (UnicodeDecodeError exception due to non-ascii llfuse setup.py) - cython code: add some int types to get rid of unspecific python add / subtract operations (avoid ``undefined symbol FPE_``... error on some platforms) - fix verbose mode display of stdin backup - extract: warn if a include pattern never matched, fixes #209, implement counters for Include/ExcludePatterns - archive names with slashes are invalid, attic issue #180 - chunker: add a check whether the POSIX_FADV_DONTNEED constant is defined - fixes building on OpenBSD. Other changes: - detect inconsistency / corruption / hash collision, #170 - replace versioneer with setuptools_scm, #106 - docs: - pkg-config is needed for llfuse installation - be more clear about pruning, attic issue #132 - unit tests: - xattr: ignore security.selinux attribute showing up - ext3 seems to need a bit more space for a sparse file - do not test lzma level 9 compression (avoid MemoryError) - work around strange mtime granularity issue on netbsd, fixes #204 - ignore st_rdev if file is not a block/char device, fixes #203 - stay away from the setgid and sticky mode bits - use Vagrant to do easy cross-platform testing (#196), currently: - Debian 7 "wheezy" 32bit, Debian 8 "jessie" 64bit - Ubuntu 12.04 32bit, Ubuntu 14.04 64bit - Centos 7 64bit - FreeBSD 10.2 64bit - OpenBSD 5.7 64bit - NetBSD 6.1.5 64bit - Darwin (OS X Yosemite) Version 0.25.0 (2015-08-29) --------------------------- Compatibility notes: - lz4 compression library (liblz4) is a new requirement (#156) - the new compression code is very compatible: as long as you stay with zlib compression, older borg releases will still be able to read data from a repo/archive made with the new code (note: this is not the case for the default "none" compression, use "zlib,0" if you want a "no compression" mode that can be read by older borg). Also the new code is able to read repos and archives made with older borg versions (for all zlib levels 0..9). Deprecations: - --compression N (with N being a number, as in 0.24) is deprecated. We keep the --compression 0..9 for now to not break scripts, but it is deprecated and will be removed later, so better fix your scripts now: --compression 0 (as in 0.24) is the same as --compression zlib,0 (now). BUT: if you do not want compression, you rather want --compression none (which is the default). --compression 1 (in 0.24) is the same as --compression zlib,1 (now) --compression 9 (in 0.24) is the same as --compression zlib,9 (now) New features: - create --compression none (default, means: do not compress, just pass through data "as is". this is more efficient than zlib level 0 as used in borg 0.24) - create --compression lz4 (super-fast, but not very high compression) - create --compression zlib,N (slower, higher compression, default for N is 6) - create --compression lzma,N (slowest, highest compression, default N is 6) - honor the nodump flag (UF_NODUMP) and do not backup such items - list --short just outputs a simple list of the files/directories in an archive Bug fixes: - fixed --chunker-params parameter order confusion / malfunction, fixes #154 - close fds of segments we delete (during compaction) - close files which fell out the lrucache - fadvise DONTNEED now is only called for the byte range actually read, not for the whole file, fixes #158. - fix issue with negative "all archives" size, fixes #165 - restore_xattrs: ignore if setxattr fails with EACCES, fixes #162 Other changes: - remove fakeroot requirement for tests, tests run faster without fakeroot (test setup does not fail any more without fakeroot, so you can run with or without fakeroot), fixes #151 and #91. - more tests for archiver - recover_segment(): don't assume we have an fd for segment - lrucache refactoring / cleanup, add dispose function, py.test tests - generalize hashindex code for any key length (less hardcoding) - lock roster: catch file not found in remove() method and ignore it - travis CI: use requirements file - improved docs: - replace hack for llfuse with proper solution (install libfuse-dev) - update docs about compression - update development docs about fakeroot - internals: add some words about lock files / locking system - support: mention BountySource and for what it can be used - theme: use a lighter green - add pypi, wheel, dist package based install docs - split install docs into system-specific preparations and generic instructions Version 0.24.0 (2015-08-09) --------------------------- Incompatible changes (compared to 0.23): - borg now always issues --umask NNN option when invoking another borg via ssh on the repository server. By that, it's making sure it uses the same umask for remote repos as for local ones. Because of this, you must upgrade both server and client(s) to 0.24. - the default umask is 077 now (if you do not specify via --umask) which might be a different one as you used previously. The default umask avoids that you accidentally give access permissions for group and/or others to files created by borg (e.g. the repository). Deprecations: - "--encryption passphrase" mode is deprecated, see #85 and #97. See the new "--encryption repokey" mode for a replacement. New features: - borg create --chunker-params ... to configure the chunker, fixes #16 (attic #302, attic #300, and somehow also #41). This can be used to reduce memory usage caused by chunk management overhead, so borg does not create a huge chunks index/repo index and eats all your RAM if you back up lots of data in huge files (like VM disk images). See docs/misc/create_chunker-params.txt for more information. - borg info now reports chunk counts in the chunk index. - borg create --compression 0..9 to select zlib compression level, fixes #66 (attic #295). - borg init --encryption repokey (to store the encryption key into the repo), fixes #85 - improve at-end error logging, always log exceptions and set exit_code=1 - LoggedIO: better error checks / exceptions / exception handling - implement --remote-path to allow non-default-path borg locations, #125 - implement --umask M and use 077 as default umask for better security, #117 - borg check: give a named single archive to it, fixes #139 - cache sync: show progress indication - cache sync: reimplement the chunk index merging in C Bug fixes: - fix segfault that happened for unreadable files (chunker: n needs to be a signed size_t), #116 - fix the repair mode, #144 - repo delete: add destroy to allowed rpc methods, fixes issue #114 - more compatible repository locking code (based on mkdir), maybe fixes #92 (attic #317, attic #201). - better Exception msg if no Borg is installed on the remote repo server, #56 - create a RepositoryCache implementation that can cope with >2GiB, fixes attic #326. - fix Traceback when running check --repair, attic #232 - clarify help text, fixes #73. - add help string for --no-files-cache, fixes #140 Other changes: - improved docs: - added docs/misc directory for misc. writeups that won't be included "as is" into the html docs. - document environment variables and return codes (attic #324, attic #52) - web site: add related projects, fix web site url, IRC #borgbackup - Fedora/Fedora-based install instructions added to docs - Cygwin-based install instructions added to docs - updated AUTHORS - add FAQ entries about redundancy / integrity - clarify that borg extract uses the cwd as extraction target - update internals doc about chunker params, memory usage and compression - added docs about development - add some words about resource usage in general - document how to backup a raw disk - add note about how to run borg from virtual env - add solutions for (ll)fuse installation problems - document what borg check does, fixes #138 - reorganize borgbackup.github.io sidebar, prev/next at top - deduplicate and refactor the docs / README.rst - use borg-tmp as prefix for temporary files / directories - short prune options without "keep-" are deprecated, do not suggest them - improved tox configuration - remove usage of unittest.mock, always use mock from pypi - use entrypoints instead of scripts, for better use of the wheel format and modern installs - add requirements.d/development.txt and modify tox.ini - use travis-ci for testing based on Linux and (new) OS X - use coverage.py, pytest-cov and codecov.io for test coverage support I forgot to list some stuff already implemented in 0.23.0, here they are: New features: - efficient archive list from manifest, meaning a big speedup for slow repo connections and "list ", "delete ", "prune" (attic #242, attic #167) - big speedup for chunks cache sync (esp. for slow repo connections), fixes #18 - hashindex: improve error messages Other changes: - explicitly specify binary mode to open binary files - some easy micro optimizations Version 0.23.0 (2015-06-11) --------------------------- Incompatible changes (compared to attic, fork related): - changed sw name and cli command to "borg", updated docs - package name (and name in urls) uses "borgbackup" to have fewer collisions - changed repo / cache internal magic strings from ATTIC* to BORG*, changed cache location to .cache/borg/ - this means that it currently won't accept attic repos (see issue #21 about improving that) Bug fixes: - avoid defect python-msgpack releases, fixes attic #171, fixes attic #185 - fix traceback when trying to do unsupported passphrase change, fixes attic #189 - datetime does not like the year 10.000, fixes attic #139 - fix "info" all archives stats, fixes attic #183 - fix parsing with missing microseconds, fixes attic #282 - fix misleading hint the fuse ImportError handler gave, fixes attic #237 - check unpacked data from RPC for tuple type and correct length, fixes attic #127 - fix Repository._active_txn state when lock upgrade fails - give specific path to xattr.is_enabled(), disable symlink setattr call that always fails - fix test setup for 32bit platforms, partial fix for attic #196 - upgraded versioneer, PEP440 compliance, fixes attic #257 New features: - less memory usage: add global option --no-cache-files - check --last N (only check the last N archives) - check: sort archives in reverse time order - rename repo::oldname newname (rename repository) - create -v output more informative - create --progress (backup progress indicator) - create --timestamp (utc string or reference file/dir) - create: if "-" is given as path, read binary from stdin - extract: if --stdout is given, write all extracted binary data to stdout - extract --sparse (simple sparse file support) - extra debug information for 'fread failed' - delete (deletes whole repo + local cache) - FUSE: reflect deduplication in allocated blocks - only allow whitelisted RPC calls in server mode - normalize source/exclude paths before matching - use posix_fadvise to not spoil the OS cache, fixes attic #252 - toplevel error handler: show tracebacks for better error analysis - sigusr1 / sigint handler to print current file infos - attic PR #286 - RPCError: include the exception args we get from remote Other changes: - source: misc. cleanups, pep8, style - docs and faq improvements, fixes, updates - cleanup crypto.pyx, make it easier to adapt to other AES modes - do os.fsync like recommended in the python docs - source: Let chunker optionally work with os-level file descriptor. - source: Linux: remove duplicate os.fsencode calls - source: refactor _open_rb code a bit, so it is more consistent / regular - source: refactor indicator (status) and item processing - source: use py.test for better testing, flake8 for code style checks - source: fix tox >=2.0 compatibility (test runner) - pypi package: add python version classifiers, add FreeBSD to platforms Attic Changelog --------------- Here you can see the full list of changes between each Attic release until Borg forked from Attic: Version 0.17 ~~~~~~~~~~~~ (bugfix release, released on X) - Fix hashindex ARM memory alignment issue (#309) - Improve hashindex error messages (#298) Version 0.16 ~~~~~~~~~~~~ (bugfix release, released on May 16, 2015) - Fix typo preventing the security confirmation prompt from working (#303) - Improve handling of systems with improperly configured file system encoding (#289) - Fix "All archives" output for attic info. (#183) - More user friendly error message when repository key file is not found (#236) - Fix parsing of iso 8601 timestamps with zero microseconds (#282) Version 0.15 ~~~~~~~~~~~~ (bugfix release, released on Apr 15, 2015) - xattr: Be less strict about unknown/unsupported platforms (#239) - Reduce repository listing memory usage (#163). - Fix BrokenPipeError for remote repositories (#233) - Fix incorrect behavior with two character directory names (#265, #268) - Require approval before accessing relocated/moved repository (#271) - Require approval before accessing previously unknown unencrypted repositories (#271) - Fix issue with hash index files larger than 2GB. - Fix Python 3.2 compatibility issue with noatime open() (#164) - Include missing pyx files in dist files (#168) Version 0.14 ~~~~~~~~~~~~ (feature release, released on Dec 17, 2014) - Added support for stripping leading path segments (#95) "attic extract --strip-segments X" - Add workaround for old Linux systems without acl_extended_file_no_follow (#96) - Add MacPorts' path to the default openssl search path (#101) - HashIndex improvements, eliminates unnecessary IO on low memory systems. - Fix "Number of files" output for attic info. (#124) - limit create file permissions so files aren't read while restoring - Fix issue with empty xattr values (#106) Version 0.13 ~~~~~~~~~~~~ (feature release, released on Jun 29, 2014) - Fix sporadic "Resource temporarily unavailable" when using remote repositories - Reduce file cache memory usage (#90) - Faster AES encryption (utilizing AES-NI when available) - Experimental Linux, OS X and FreeBSD ACL support (#66) - Added support for backup and restore of BSDFlags (OSX, FreeBSD) (#56) - Fix bug where xattrs on symlinks were not correctly restored - Added cachedir support. CACHEDIR.TAG compatible cache directories can now be excluded using ``--exclude-caches`` (#74) - Fix crash on extreme mtime timestamps (year 2400+) (#81) - Fix Python 3.2 specific lockf issue (EDEADLK) Version 0.12 ~~~~~~~~~~~~ (feature release, released on April 7, 2014) - Python 3.4 support (#62) - Various documentation improvements a new style - ``attic mount`` now supports mounting an entire repository not only individual archives (#59) - Added option to restrict remote repository access to specific path(s): ``attic serve --restrict-to-path X`` (#51) - Include "all archives" size information in "--stats" output. (#54) - Added ``--stats`` option to ``attic delete`` and ``attic prune`` - Fixed bug where ``attic prune`` used UTC instead of the local time zone when determining which archives to keep. - Switch to SI units (Power of 1000 instead 1024) when printing file sizes Version 0.11 ~~~~~~~~~~~~ (feature release, released on March 7, 2014) - New "check" command for repository consistency checking (#24) - Documentation improvements - Fix exception during "attic create" with repeated files (#39) - New "--exclude-from" option for attic create/extract/verify. - Improved archive metadata deduplication. - "attic verify" has been deprecated. Use "attic extract --dry-run" instead. - "attic prune --hourly|daily|..." has been deprecated. Use "attic prune --keep-hourly|daily|..." instead. - Ignore xattr errors during "extract" if not supported by the filesystem. (#46) Version 0.10 ~~~~~~~~~~~~ (bugfix release, released on Jan 30, 2014) - Fix deadlock when extracting 0 sized files from remote repositories - "--exclude" wildcard patterns are now properly applied to the full path not just the file name part (#5). - Make source code endianness agnostic (#1) Version 0.9 ~~~~~~~~~~~ (feature release, released on Jan 23, 2014) - Remote repository speed and reliability improvements. - Fix sorting of segment names to ignore NFS left over files. (#17) - Fix incorrect display of time (#13) - Improved error handling / reporting. (#12) - Use fcntl() instead of flock() when locking repository/cache. (#15) - Let ssh figure out port/user if not specified so we don't override .ssh/config (#9) - Improved libcrypto path detection (#23). Version 0.8.1 ~~~~~~~~~~~~~ (bugfix release, released on Oct 4, 2013) - Fix segmentation fault issue. Version 0.8 ~~~~~~~~~~~ (feature release, released on Oct 3, 2013) - Fix xattr issue when backing up sshfs filesystems (#4) - Fix issue with excessive index file size (#6) - Support access of read only repositories. - New syntax to enable repository encryption: attic init --encryption="none|passphrase|keyfile". - Detect and abort if repository is older than the cache. Version 0.7 ~~~~~~~~~~~ (feature release, released on Aug 5, 2013) - Ported to FreeBSD - Improved documentation - Experimental: Archives mountable as FUSE filesystems. - The "user." prefix is no longer stripped from xattrs on Linux Version 0.6.1 ~~~~~~~~~~~~~ (bugfix release, released on July 19, 2013) - Fixed an issue where mtime was not always correctly restored. Version 0.6 ~~~~~~~~~~~ First public release on July 9, 2013 borgbackup-1.1.15/docs/index.rst0000644000175000017500000000053613771325506016502 0ustar useruser00000000000000.. include:: global.rst.inc .. highlight:: none Borg Documentation ================== .. include:: ../README.rst .. when you add an element here, do not forget to add it to book.rst .. toctree:: :maxdepth: 2 installation quickstart usage deployment faq support resources changes internals development authors borgbackup-1.1.15/docs/resources.rst0000644000175000017500000000305313771325506017402 0ustar useruser00000000000000.. include:: global.rst.inc .. _resources: Resources ========= This is a collection of additional resources that are somehow related to borgbackup. Videos, Talks, Presentations ---------------------------- Some of them refer to attic, but you can do the same stuff (and more) with borgbackup. - `BorgBackup Installation and Basic Usage `_ (english screencast) - `TW's slides for borgbackup talks / lightning talks `_ (just grab the latest ones) - `Attic / Borg Backup talk from GPN 2015 (media.ccc.de) `_ - `Attic / Borg Backup talk from GPN 2015 (youtube) `_ - `Attic talk from Easterhegg 2015 (media.ccc.de) `_ - `Attic talk from Easterhegg 2015 (youtube) `_ - `Attic Backup: Mount your encrypted backups over ssh (youtube) `_ - `Evolution of Borg (youtube) `_ .. _software: Software -------- - `BorgWeb - a very simple web UI for BorgBackup `_ - some other stuff found at the `BorgBackup Github organisation `_ - `borgmatic `_ - simple wrapper script for BorgBackup that creates and prunes backups borgbackup-1.1.15/docs/book.rst0000644000175000017500000000063713771325506016327 0ustar useruser00000000000000:orphan: .. include:: global.rst.inc Borg documentation ================== .. when you add an element here, do not forget to add it to index.rst .. Note: Some things are in appendices (see latex_appendices in conf.py) .. toctree:: :maxdepth: 2 introduction installation quickstart usage deployment faq support resources changes internals development authors borgbackup-1.1.15/docs/usage.rst0000644000175000017500000000301413771325506016471 0ustar useruser00000000000000.. include:: global.rst.inc .. highlight:: none .. _detailed_usage: Usage ===== .. raw:: html Redirecting... .. toctree:: usage/general usage/init usage/create usage/extract usage/check usage/rename usage/list usage/diff usage/delete usage/prune usage/info usage/mount usage/key usage/upgrade usage/recreate usage/tar usage/serve usage/config usage/lock usage/benchmark usage/help usage/debug usage/notes borgbackup-1.1.15/docs/3rd_party/0000755000175000017500000000000013771325773016552 5ustar useruser00000000000000borgbackup-1.1.15/docs/3rd_party/blake2/0000755000175000017500000000000013771325773017712 5ustar useruser00000000000000borgbackup-1.1.15/docs/3rd_party/blake2/COPYING0000644000175000017500000001561113771325506020743 0ustar useruser00000000000000Creative Commons Legal Code CC0 1.0 Universal CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER. Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; ii. moral rights retained by the original author(s) and/or performer(s); iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; v. rights protecting the extraction, dissemination, use and reuse of data in a Work; vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. borgbackup-1.1.15/docs/3rd_party/blake2/README.md0000644000175000017500000000071413771325506021165 0ustar useruser00000000000000# BLAKE2 This is the reference source code package of BLAKE2. All code is triple-licensed under the [CC0](http://creativecommons.org/publicdomain/zero/1.0), the [OpenSSL Licence](https://www.openssl.org/source/license.html), or the [Apache Public License 2.0](https://www.apache.org/licenses/LICENSE-2.0), at your choosing. More: [https://blake2.net](https://blake2.net). [GitHub repository](https://github.com/BLAKE2/BLAKE2). Contact: contact@blake2.net borgbackup-1.1.15/docs/3rd_party/README0000644000175000017500000000032713771325506017426 0ustar useruser00000000000000Here we store 3rd party documentation, licenses, etc. Please note that all files inside the "borg" package directory (except the stuff excluded in setup.py) will be INSTALLED, so don't keep docs or licenses there. borgbackup-1.1.15/docs/3rd_party/zstd/0000755000175000017500000000000013771325773017536 5ustar useruser00000000000000borgbackup-1.1.15/docs/3rd_party/zstd/LICENSE0000644000175000017500000000277213771325506020545 0ustar useruser00000000000000BSD License For Zstandard software Copyright (c) 2016-present, Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. borgbackup-1.1.15/docs/3rd_party/lz4/0000755000175000017500000000000013771325773017263 5ustar useruser00000000000000borgbackup-1.1.15/docs/3rd_party/lz4/LICENSE0000644000175000017500000000243713771325506020270 0ustar useruser00000000000000LZ4 Library Copyright (c) 2011-2016, Yann Collet All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. borgbackup-1.1.15/docs/3rd_party/msgpack/0000755000175000017500000000000013771325773020177 5ustar useruser00000000000000borgbackup-1.1.15/docs/3rd_party/msgpack/COPYING0000644000175000017500000000114613771325506021226 0ustar useruser00000000000000Copyright (C) 2008-2011 INADA Naoki Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. borgbackup-1.1.15/docs/authors.rst0000644000175000017500000000021313771325506017050 0ustar useruser00000000000000.. include:: global.rst.inc Authors ======= .. include:: ../AUTHORS License ======= .. _license: .. include:: ../LICENSE :literal: borgbackup-1.1.15/docs/deployment.rst0000644000175000017500000000051113771325506017544 0ustar useruser00000000000000.. include:: global.rst.inc .. highlight:: none Deployment ========== This chapter details deployment strategies for the following scenarios. .. toctree:: :titlesonly: deployment/central-backup-server deployment/hosting-repositories deployment/automated-local deployment/image-backup deployment/pull-backup borgbackup-1.1.15/docs/conf.py0000644000175000017500000002023513771325506016136 0ustar useruser00000000000000# -*- coding: utf-8 -*- # # documentation build configuration file, created by # sphinx-quickstart on Sat Sep 10 18:18:25 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import sys, os sys.path.insert(0, os.path.abspath('../src')) from borg import __version__ as sw_version # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Borg - Deduplicating Archiver' copyright = u'2010-2014 Jonas Borgström, 2015-2020 The Borg Collective (see AUTHORS file)' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. split_char = '+' if '+' in sw_version else '-' version = sw_version.split(split_char)[0] # The full version, including alpha/beta/rc tags. release = version suppress_warnings = ['image.nonlocal_uri'] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%Y-%m-%d' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # The Borg docs contain no or very little Python docs. # Thus, the primary domain is rst. primary_domain = 'rst' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import guzzle_sphinx_theme html_theme_path = guzzle_sphinx_theme.html_theme_path() html_theme = 'guzzle_sphinx_theme' def set_rst_settings(app): app.env.settings.update({ 'field_name_limit': 0, 'option_limit': 0, }) def setup(app): app.add_stylesheet('css/borg.css') app.connect('builder-inited', set_rst_settings) # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'project_nav_name': 'Borg %s' % version, } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/logo.svg' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['borg_theme'] html_extra_path = ['../src/borg/paperkey.html'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': ['logo-text.html', 'searchbox.html', 'globaltoc.html'], } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'borgdoc' # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('book', 'Borg.tex', 'Borg Documentation', 'The Borg Collective', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = '_static/logo.pdf' latex_elements = { 'papersize': 'a4paper', 'pointsize': '10pt', 'figure_align': 'H', } # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. latex_show_urls = 'footnote' # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. latex_appendices = [ 'support', 'resources', 'changes', 'authors', ] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('usage', 'borg', 'BorgBackup is a deduplicating backup program with optional compression and authenticated encryption.', ['The Borg Collective (see AUTHORS file)'], 1), ] extensions = [ 'sphinx.ext.extlinks', 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', ] extlinks = { 'issue': ('https://github.com/borgbackup/borg/issues/%s', '#'), 'targz_url': ('https://pypi.python.org/packages/source/b/borgbackup/%%s-%s.tar.gz' % version, None), } borgbackup-1.1.15/docs/quickstart.rst0000644000175000017500000005251113771325506017565 0ustar useruser00000000000000.. include:: global.rst.inc .. highlight:: bash .. _quickstart: Quick Start =========== This chapter will get you started with |project_name| and covers various use cases. A step by step example ---------------------- .. include:: quickstart_example.rst.inc Archives and repositories ------------------------- A *Borg archive* is the result of a single backup (``borg create``). An archive stores a snapshot of the data of the files "inside" it. One can later extract or mount an archive to restore from a backup. *Repositories* are filesystem directories acting as self-contained stores of archives. Repositories can be accessed locally via path or remotely via ssh. Under the hood, repositories contain data blocks and a manifest tracking which blocks are in each archive. If some data hasn't changed from one backup to another, Borg can simply reference an already uploaded data chunk (deduplication). Important note about free space ------------------------------- Before you start creating backups, please make sure that there is *always* a good amount of free space on the filesystem that has your backup repository (and also on ~/.cache). A few GB should suffice for most hard-drive sized repositories. See also :ref:`cache-memory-usage`. Borg doesn't use space reserved for root on repository disks (even when run as root), on file systems which do not support this mechanism (e.g. XFS) we recommend to reserve some space in Borg itself just to be safe by adjusting the ``additional_free_space`` setting (a good starting point is ``2G``):: borg config /path/to/repo additional_free_space 2G If |project_name| runs out of disk space, it tries to free as much space as it can while aborting the current operation safely, which allows to free more space by deleting/pruning archives. This mechanism is not bullet-proof in some circumstances [1]_. If you *really* run out of disk space, it can be hard or impossible to free space, because |project_name| needs free space to operate - even to delete backup archives. You can use some monitoring process or just include the free space information in your backup log files (you check them regularly anyway, right?). Also helpful: - create a big file as a "space reserve", that you can delete to free space - if you use LVM: use a LV + a filesystem that you can resize later and have some unallocated PEs you can add to the LV. - consider using quotas - use `prune` regularly .. [1] This failsafe can fail in these circumstances: - The underlying file system doesn't support statvfs(2), or returns incorrect data, or the repository doesn't reside on a single file system - Other tasks fill the disk simultaneously - Hard quotas (which may not be reflected in statvfs(2)) Important note about permissions -------------------------------- Using root likely will be required if you want to backup files of other users or the operating system. If you only back up your own files, you neither need nor want to use root. Avoid to create a mixup of users and permissions in your repository (or cache). This can easily happen if you run borg using different user accounts (e.g. your non-privileged user and root) while accessing the same repo. Of course, a non-root user will have no permission to work with the files created by root (or another user) and borg operations will just fail with `Permission denied`. The easy way to avoid this is to always access the repo as the same user: For a local repository just always invoke borg as same user. For a remote repository: always use e.g. borg@remote_host. You can use this from different local users, the remote user accessing the repo will always be borg. If you need to access a local repository from different users, you can use the same method by using ssh to borg@localhost. Important note about files changing during the backup process ------------------------------------------------------------- Borg does not do anything about the internal consistency of the data it backs up. It just reads and backs up each file in whatever state that file is when Borg gets to it. On an active system, this can lead to two kinds of inconsistency: - By the time Borg backs up a file, it might have changed since the backup process was initiated - A file could change while Borg is backing it up, making the file internally inconsistent If you have a set of files and want to ensure that they are backed up in a specific or consistent state, you must take steps to prevent changes to those files during the backup process. There are a few common techniques to achieve this. - Avoid running any programs that might change the files. - Snapshot files, filesystems, container storage volumes, or logical volumes. LVM or ZFS might be useful here. - Dump databases or stop the database servers. - Shut down virtual machines before backing up their images. - Shut down containers before backing up their storage volumes. For some systems Borg might work well enough without these precautions. If you are simply backing up the files on a system that isn't very active (e.g. in a typical home directory), Borg usually works well enough without further care for consistency. Log files and caches might not be in a perfect state, but this is rarely a problem. For databases, virtual machines, and containers, there are specific techniques for backing them up that do not simply use Borg to backup the underlying filesystem. For databases, check your database documentation for techniques that will save the database state between transactions. For virtual machines, consider running the backup on the VM itself or mounting the filesystem while the VM is shut down. For Docker containers, perhaps docker's "save" command can help. Automating backups ------------------ The following example script is meant to be run daily by the ``root`` user on different local machines. It backs up a machine's important files (but not the complete operating system) to a repository ``~/backup/main`` on a remote server. Some files which aren't necessarily needed in this backup are excluded. See :ref:`borg_patterns` on how to add more exclude options. After the backup this script also uses the :ref:`borg_prune` subcommand to keep only a certain number of old archives and deletes the others in order to preserve disk space. Before running, make sure that the repository is initialized as documented in :ref:`remote_repos` and that the script has the correct permissions to be executable by the root user, but not executable or readable by anyone else, i.e. root:root 0700. You can use this script as a starting point and modify it where it's necessary to fit your setup. Do not forget to test your created backups to make sure everything you need is being backed up and that the ``prune`` command is keeping and deleting the correct backups. .. note:: Please see the :ref:`software` section for related tooling for automating backups. :: #!/bin/sh # Setting this, so the repo does not need to be given on the commandline: export BORG_REPO=ssh://username@example.com:2022/~/backup/main # See the section "Passphrase notes" for more infos. export BORG_PASSPHRASE='XYZl0ngandsecurepa_55_phrasea&&123' # some helpers and error handling: info() { printf "\n%s %s\n\n" "$( date )" "$*" >&2; } trap 'echo $( date ) Backup interrupted >&2; exit 2' INT TERM info "Starting backup" # Backup the most important directories into an archive named after # the machine this script is currently running on: borg create \ --verbose \ --filter AME \ --list \ --stats \ --show-rc \ --compression lz4 \ --exclude-caches \ --exclude '/home/*/.cache/*' \ --exclude '/var/cache/*' \ --exclude '/var/tmp/*' \ \ ::'{hostname}-{now}' \ /etc \ /home \ /root \ /var \ backup_exit=$? info "Pruning repository" # Use the `prune` subcommand to maintain 7 daily, 4 weekly and 6 monthly # archives of THIS machine. The '{hostname}-' prefix is very important to # limit prune's operation to this machine's archives and not apply to # other machines' archives also: borg prune \ --list \ --prefix '{hostname}-' \ --show-rc \ --keep-daily 7 \ --keep-weekly 4 \ --keep-monthly 6 \ prune_exit=$? # use highest exit code as global exit code global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit )) if [ ${global_exit} -eq 0 ]; then info "Backup and Prune finished successfully" elif [ ${global_exit} -eq 1 ]; then info "Backup and/or Prune finished with warnings" else info "Backup and/or Prune finished with errors" fi exit ${global_exit} Pitfalls with shell variables and environment variables ------------------------------------------------------- This applies to all environment variables you want borg to see, not just ``BORG_PASSPHRASE``. The short explanation is: always ``export`` your variable, and use single quotes if you're unsure of the details of your shell's expansion behavior. E.g.:: export BORG_PASSPHRASE='complicated & long' This is because ``export`` exposes variables to subprocesses, which borg may be one of. More on ``export`` can be found in the "ENVIRONMENT" section of the bash(1) man page. Beware of how ``sudo`` interacts with environment variables. For example, you may be surprised that the following ``export`` has no effect on your command:: export BORG_PASSPHRASE='complicated & long' sudo ./yourborgwrapper.sh # still prompts for password For more information, refer to the sudo(8) man page and ``env_keep`` in the sudoers(5) man page. .. Tip:: To debug what your borg process is actually seeing, find its PID (``ps aux|grep borg``) and then look into ``/proc//environ``. .. passphrase_notes: Passphrase notes ---------------- If you use encryption (or authentication), Borg will interactively ask you for a passphrase to encrypt/decrypt the keyfile / repokey. A passphrase should be a single line of text, a trailing linefeed will be stripped. For your own safety, you maybe want to avoid empty passphrases as well extremely long passphrase (much more than 256 bits of entropy). Also avoid passphrases containing non-ASCII characters. Borg is technically able to process all unicode text, but you might get into trouble reproducing the same encoded utf-8 bytes or with keyboard layouts, so better just avoid non-ASCII stuff. If you want to automate, you can alternatively supply the passphrase directly or indirectly using some environment variables. You can directly give a passphrase:: # use this passphrase (use safe permissions on the script!): export BORG_PASSPHRASE='my super secret passphrase' Or ask an external program to supply the passphrase:: # use the "pass" password manager to get the passphrase: export BORG_PASSCOMMAND='pass show backup' # use GPG to get the passphrase contained in a gpg-encrypted file: export BORG_PASSCOMMAND='gpg --decrypt borg-passphrase.gpg' Or read the passphrase from an open file descriptor:: export BORG_PASSPHRASE_FD=42 Using hardware crypto devices (like Nitrokey, Yubikey and others) is not directly supported by borg, but you can use these indirectly. E.g. if your crypto device supports GPG and borg calls ``gpg`` via ``BORG_PASSCOMMAND``, it should just work. .. backup_compression: Backup compression ------------------ The default is lz4 (very fast, but low compression ratio), but other methods are supported for different situations. You can use zstd for a wide range from high speed (and relatively low compression) using N=1 to high compression (and lower speed) using N=22. zstd is a modern compression algorithm and might be preferable over zlib and lzma, except if you need compatibility to older borg versions (< 1.1.4) that did not yet offer zstd.:: $ borg create --compression zstd,N /path/to/repo::arch ~ Other options are: If you have a fast repo storage and you want minimum CPU usage, no compression:: $ borg create --compression none /path/to/repo::arch ~ If you have a less fast repo storage and you want a bit more compression (N=0..9, 0 means no compression, 9 means high compression): :: $ borg create --compression zlib,N /path/to/repo::arch ~ If you have a very slow repo storage and you want high compression (N=0..9, 0 means low compression, 9 means high compression): :: $ borg create --compression lzma,N /path/to/repo::arch ~ You'll need to experiment a bit to find the best compression for your use case. Keep an eye on CPU load and throughput. .. _encrypted_repos: Repository encryption --------------------- You can choose the repository encryption mode at repository creation time:: $ borg init --encryption=MODE PATH For a list of available encryption MODEs and their descriptions, please refer to :ref:`borg_init`. If you use encryption, all data is encrypted on the client before being written to the repository. This means that an attacker who manages to compromise the host containing an encrypted repository will not be able to access any of the data, even while the backup is being made. Key material is stored in encrypted form and can be only decrypted by providing the correct passphrase. For automated backups the passphrase can be specified using the `BORG_PASSPHRASE` environment variable. .. note:: Be careful about how you set that environment, see :ref:`this note about password environments ` for more information. .. warning:: The repository data is totally inaccessible without the key and the key passphrase. Make a backup copy of the key file (``keyfile`` mode) or repo config file (``repokey`` mode) and keep it at a safe place, so you still have the key in case it gets corrupted or lost. Also keep your passphrase at a safe place. You can make backups using :ref:`borg_key_export` subcommand. If you want to print a backup of your key to paper use the ``--paper`` option of this command and print the result, or this print `template`_ if you need a version with QR-Code. A backup inside of the backup that is encrypted with that key/passphrase won't help you with that, of course. .. _template: paperkey.html .. _remote_repos: Remote repositories ------------------- |project_name| can initialize and access repositories on remote hosts if the host is accessible using SSH. This is fastest and easiest when |project_name| is installed on the remote host, in which case the following syntax is used:: $ borg init user@hostname:/path/to/repo Note: please see the usage chapter for a full documentation of repo URLs. Remote operations over SSH can be automated with SSH keys. You can restrict the use of the SSH keypair by prepending a forced command to the SSH public key in the remote server's `authorized_keys` file. This example will start |project_name| in server mode and limit it to a specific filesystem path:: command="borg serve --restrict-to-path /path/to/repo",restrict ssh-rsa AAAAB3[...] If it is not possible to install |project_name| on the remote host, it is still possible to use the remote host to store a repository by mounting the remote filesystem, for example, using sshfs:: $ sshfs user@hostname:/path/to /path/to $ borg init /path/to/repo $ fusermount -u /path/to You can also use other remote filesystems in a similar way. Just be careful, not all filesystems out there are really stable and working good enough to be acceptable for backup usage. Restoring a backup ------------------ Please note that we are only describing the most basic commands and options here - please refer to the command reference to see more. For restoring, you usually want to work **on the same machine as the same user** that was also used to create the backups of the wanted files. Doing it like that avoids quite some issues: - no confusion relating to paths - same mapping of user/group names to user/group IDs - no permission issues - you likely already have a working borg setup there, - maybe including a environment variable for the key passphrase (for encrypted repos), - maybe including a keyfile for the repo (not needed for repokey mode), - maybe including a ssh key for the repo server (not needed for locally mounted repos), - maybe including a valid borg cache for that repo (quicker than cache rebuild). The **user** might be: - root (if full backups, backups including system stuff or multiple users' files were made) - some specific user using sudo to execute borg as root - some specific user (if backups of that user's files were made) A borg **backup repository** can be either: - in a local directory (like e.g. a locally mounted USB disk) - on a remote backup server machine that is reachable via ssh (client/server) If the repository is encrypted, you will also need the **key** and the **passphrase** (which is protecting the key). The **key** can be located: - in the repository (**repokey** mode). Easy, this will usually "just work". - in the home directory of the user who did the backup (**keyfile** mode). This may cause a bit more effort: - if you have just lost that home directory and you first need to restore the borg key (e.g. from the separate backup you have made of it or from another user or machine accessing the same repository). - if you first must find out the correct machine / user / home directory (where the borg client was run to make the backups). The **passphrase** for the key has been either: - entered interactively at backup time (not practical if backup is automated / unattended). - acquired via some environment variable driven mechanism in the backup script (look there for BORG_PASSPHRASE, BORG_PASSCOMMAND, etc. and just do it like that). There are **2 ways to restore** files from a borg backup repository: - **borg mount** - use this if: - you don't precisely know what files you want to restore - you don't know which archive contains the files (in the state) you want - you need to look into files / directories before deciding what you want - you need a relatively low volume of data restored - you don't care for restoring stuff that the FUSE mount is not implementing yet (like special fs flags, ACLs) - you have a client with good resources (RAM, CPU, temp. disk space) - you want to rather use some filemanager to restore (copy) files than borg extract shell commands - **borg extract** - use this if: - you precisely know what you want (repo, archive, path) - you need a high volume of files restored (best speed) - you want a as-complete-as-it-gets reproduction of file metadata (like special fs flags, ACLs) - you have a client with low resources (RAM, CPU, temp. disk space) Example with **borg mount**: :: # open a new, separate terminal (this terminal will be blocked until umount) # now we find out the archive names we have in the repo: borg list /mnt/backup/borg_repo # mount one archive from a borg repo: borg mount /mnt/backup/borg_repo::myserver-system-2019-08-11 /mnt/borg # alternatively, mount all archives from a borg repo (slower): borg mount /mnt/backup/borg_repo /mnt/borg # it may take a while until you will see stuff in /mnt/borg. # now use another terminal or file browser and look into /mnt/borg. # when finished, umount to unlock the repo and unblock the terminal: borg umount /mnt/borg Example with **borg extract**: :: # borg extract always extracts into current directory and that directory # should be empty (borg does not support transforming a non-empty dir to # the state as present in your backup archive). mkdir borg_restore cd borg_restore # now we find out the archive names we have in the repo: borg list /mnt/backup/borg_repo # we could find out the archive contents, esp. the path layout: borg list /mnt/backup/borg_repo::myserver-system-2019-08-11 # we extract only some specific path (note: no leading / !): borg extract /mnt/backup/borg_repo::myserver-system-2019-08-11 path/to/extract # alternatively, we could fully extract the archive: borg extract /mnt/backup/borg_repo::myserver-system-2019-08-11 # now move the files to the correct place... Difference when using a **remote borg backup server**: It is basically all the same as with the local repository, but you need to refer to the repo using a ``ssh://`` URL. In the given example, ``borg`` is the user name used to log into the machine ``backup.example.org`` which runs ssh on port ``2222`` and has the borg repo in ``/path/to/repo``. Instead of giving a FQDN or a hostname, you can also give an IP address. As usual, you either need a password to log in or the backup server might have authentication set up via ssh ``authorized_keys`` (which is likely the case if unattended, automated backups were done). :: borg mount ssh://borg@backup.example.org:2222/path/to/repo /mnt/borg # or borg extract ssh://borg@backup.example.org:2222/path/to/repo borgbackup-1.1.15/docs/faq.rst0000644000175000017500000014515013771325506016144 0ustar useruser00000000000000.. include:: global.rst.inc .. highlight:: none .. _faq: Frequently asked questions ========================== Usage & Limitations ################### What is the difference between a repo on an external hard drive vs. repo on a server? ------------------------------------------------------------------------------------- If Borg is running in client/server mode, the client uses SSH as a transport to talk to the remote agent, which is another Borg process (Borg is installed on the server, too). The Borg server is doing storage-related low-level repo operations (get, put, commit, check, compact), while the Borg client does the high-level stuff: deduplication, encryption, compression, dealing with archives, backups, restores, etc., which reduces the amount of data that goes over the network. When Borg is writing to a repo on a locally mounted remote file system, e.g. SSHFS, the Borg client only can do file system operations and has no agent running on the remote side, so *every* operation needs to go over the network, which is slower. Can I backup from multiple servers into a single repository? ------------------------------------------------------------ Yes, this is *possible* from the technical standpoint, but it is *not recommended* from the security perspective. |project_name| is built upon a defined :ref:`attack_model` that cannot provide its guarantees for multiple clients using the same repository. See :ref:`borg_security_critique` for a detailed explanation. Also, in order for the deduplication used by |project_name| to work, it needs to keep a local cache containing checksums of all file chunks already stored in the repository. This cache is stored in ``~/.cache/borg/``. If |project_name| detects that a repository has been modified since the local cache was updated it will need to rebuild the cache. This rebuild can be quite time consuming. So, yes it's possible. But it will be most efficient if a single repository is only modified from one place. Also keep in mind that |project_name| will keep an exclusive lock on the repository while creating or deleting archives, which may make *simultaneous* backups fail. Can I copy or synchronize my repo to another location? ------------------------------------------------------ If you want to have redundant backup repositories (preferably at separate locations), the recommended way to do that is like this: - ``borg init repo1`` - ``borg init repo2`` - client machine ---borg create---> repo1 - client machine ---borg create---> repo2 This will create distinct repositories (separate repo ID, separate keys) and nothing bad happening in repo1 will influence repo2. Some people decide against above recommendation and create identical copies of a repo (using some copy / sync / clone tool). While this might be better than having no redundancy at all, you have to be very careful about how you do that and what you may / must not do with the result (if you decide against our recommendation). What you would get with this is: - client machine ---borg create---> repo - repo ---copy/sync---> copy-of-repo There is no special borg command to do the copying, you could just use any reliable tool that creates an identical copy (cp, rsync, rclone might be options). But think about whether that is really what you want. If something goes wrong in repo, you will have the same issue in copy-of-repo. Make sure you do the copy/sync while no backup is running, see :ref:`borg_with-lock` about how to do that. Also, you must not run borg against multiple instances of the same repo (like repo and copy-of-repo) as that would create severe issues: - Data loss: they have the same repository ID, so the borg client will think they are identical and e.g. use the same local cache for them (which is an issue if they happen to be not the same). See :issue:`4272` for an example. - Encryption security issues if you would update repo and copy-of-repo independently, due to AES counter reuse. There is also a similar encryption security issue for the disaster case: If you lose repo and the borg client-side config/cache and you restore the repo from an older copy-of-repo, you also run into AES counter reuse. Which file types, attributes, etc. are *not* preserved? ------------------------------------------------------- * UNIX domain sockets (because it does not make sense - they are meaningless without the running process that created them and the process needs to recreate them in any case). So, don't panic if your backup misses a UDS! * The precise on-disk (or rather: not-on-disk) representation of the holes in a sparse file. Archive creation has no special support for sparse files, holes are backed up as (deduplicated and compressed) runs of zero bytes. Archive extraction has optional support to extract all-zero chunks as holes in a sparse file. * Some filesystem specific attributes, like btrfs NOCOW, see :ref:`platforms`. * For hardlinked symlinks, the hardlinking can not be archived (and thus, the hardlinking will not be done at extraction time). The symlinks will be archived and extracted as non-hardlinked symlinks, see :issue:`2379`. Are there other known limitations? ---------------------------------- - A single archive can only reference a limited volume of file/dir metadata, usually corresponding to tens or hundreds of millions of files/dirs. When trying to go beyond that limit, you will get a fatal IntegrityError exception telling that the (archive) object is too big. An easy workaround is to create multiple archives with fewer items each. See also the :ref:`archive_limitation` and :issue:`1452`. :ref:`borg_info` shows how large (relative to the maximum size) existing archives are. - borg extract only supports restoring into an empty destination. After that, the destination will exactly have the contents of the extracted archive. If you extract into a non-empty destination, borg will (for example) not remove files which are in the destination, but not in the archive. See :issue:`4598` for a workaround and more details. .. _checkpoints_parts: If a backup stops mid-way, does the already-backed-up data stay there? ---------------------------------------------------------------------- Yes, |project_name| supports resuming backups. During a backup a special checkpoint archive named ``.checkpoint`` is saved every checkpoint interval (the default value for this is 30 minutes) containing all the data backed-up until that point. This checkpoint archive is a valid archive, but it is only a partial backup (not all files that you wanted to backup are contained in it). Having it in the repo until a successful, full backup is completed is useful because it references all the transmitted chunks up to the checkpoint. This means that in case of an interruption, you only need to retransfer the data since the last checkpoint. If a backup was interrupted, you normally do not need to do anything special, just invoke ``borg create`` as you always do. If the repository is still locked, you may need to run ``borg break-lock`` before the next backup. You may use the same archive name as in previous attempt or a different one (e.g. if you always include the current datetime), it does not matter. |project_name| always does full single-pass backups, so it will start again from the beginning - but it will be much faster, because some of the data was already stored into the repo (and is still referenced by the checkpoint archive), so it does not need to get transmitted and stored again. Once your backup has finished successfully, you can delete all ``.checkpoint`` archives. If you run ``borg prune``, it will also care for deleting unneeded checkpoints. Note: the checkpointing mechanism creates hidden, partial files in an archive, so that checkpoints even work while a big file is being processed. They are named ``.borg_part_`` and all operations usually ignore these files, but you can make them considered by giving the option ``--consider-part-files``. You usually only need that option if you are really desperate (e.g. if you have no completed backup of that file and you'ld rather get a partial file extracted than nothing). You do **not** want to give that option under any normal circumstances. Note that checkpoints inside files are created only since version 1.1, make sure you have an up-to-date version of borgbackup if you want to continue instead of retransferring a huge file. In some cases, there is only an outdated version shipped with your distribution (e.g. Debian). See :ref:`installation`. How can I backup huge file(s) over a unstable connection? --------------------------------------------------------- This is not a problem anymore. For more details, see :ref:`checkpoints_parts`. How can I switch append-only mode on and off? ----------------------------------------------------------------------------------------------------------------------------------- You could do that (via borg config REPO append_only 0/1), but using different ssh keys and different entries in ``authorized_keys`` is much easier and also maybe has less potential of things going wrong somehow. My machine goes to sleep causing `Broken pipe` ---------------------------------------------- When backing up your data over the network, your machine should not go to sleep. On macOS you can use `caffeinate` to avoid that. How can I restore huge file(s) over an unstable connection? ----------------------------------------------------------- If you cannot manage to extract the whole big file in one go, you can extract all the part files and manually concatenate them together. For more details, see :ref:`checkpoints_parts`. Can |project_name| add redundancy to the backup data to deal with hardware malfunction? --------------------------------------------------------------------------------------- No, it can't. While that at first sounds like a good idea to defend against some defect HDD sectors or SSD flash blocks, dealing with this in a reliable way needs a lot of low-level storage layout information and control which we do not have (and also can't get, even if we wanted). So, if you need that, consider RAID or a filesystem that offers redundant storage or just make backups to different locations / different hardware. See also :issue:`225`. Can |project_name| verify data integrity of a backup archive? ------------------------------------------------------------- Yes, if you want to detect accidental data damage (like bit rot), use the ``check`` operation. It will notice corruption using CRCs and hashes. If you want to be able to detect malicious tampering also, use an encrypted repo. It will then be able to check using CRCs and HMACs. Can I use Borg on SMR hard drives? ---------------------------------- SMR (shingled magnetic recording) hard drives are very different from regular hard drives. Applications have to behave in certain ways or performance will be heavily degraded. Borg 1.1 ships with default settings suitable for SMR drives, and has been successfully tested on *Seagate Archive v2* drives using the ext4 file system. Some Linux kernel versions between 3.19 and 4.5 had various bugs handling device-managed SMR drives, leading to IO errors, unresponsive drives and unreliable operation in general. For more details, refer to :issue:`2252`. .. _faq-integrityerror: I get an IntegrityError or similar - what now? ---------------------------------------------- A single error does not necessarily indicate bad hardware or a Borg bug. All hardware exhibits a bit error rate (BER). Hard drives are typically specified as exhibiting fewer than one error every 12 to 120 TB (one bit error in 10e14 to 10e15 bits). The specification is often called *unrecoverable read error rate* (URE rate). Apart from these very rare errors there are two main causes of errors: (i) Defective hardware: described below. (ii) Bugs in software (Borg, operating system, libraries): Ensure software is up to date. Check whether the issue is caused by any fixed bugs described in :ref:`important_notes`. .. rubric:: Finding defective hardware .. note:: Hardware diagnostics are operating system dependent and do not apply universally. The commands shown apply for popular Unix-like systems. Refer to your operating system's manual. Checking hard drives Find the drive containing the repository and use *findmnt*, *mount* or *lsblk* to learn the device path (typically */dev/...*) of the drive. Then, smartmontools can retrieve self-diagnostics of the drive in question:: # smartctl -a /dev/sdSomething The *Offline_Uncorrectable*, *Current_Pending_Sector* and *Reported_Uncorrect* attributes indicate data corruption. A high *UDMA_CRC_Error_Count* usually indicates a bad cable. I/O errors logged by the system (refer to the system journal or dmesg) can point to issues as well. I/O errors only affecting the file system easily go unnoticed, since they are not reported to applications (e.g. Borg), while these errors can still corrupt data. Drives can corrupt some sectors in one event, while remaining reliable otherwise. Conversely, drives can fail completely with no advance warning. If in doubt, copy all data from the drive in question to another drive -- just in case it fails completely. If any of these are suspicious, a self-test is recommended:: # smartctl -t long /dev/sdSomething Running ``fsck`` if not done already might yield further insights. Checking memory Intermittent issues, such as ``borg check`` finding errors inconsistently between runs, are frequently caused by bad memory. Run memtest86+ (or an equivalent memory tester) to verify that the memory subsystem is operating correctly. Checking processors Processors rarely cause errors. If they do, they are usually overclocked or otherwise operated outside their specifications. We do not recommend to operate hardware outside its specifications for productive use. Tools to verify correct processor operation include Prime95 (mprime), linpack, and the `Intel Processor Diagnostic Tool `_ (applies only to Intel processors). .. rubric:: Repairing a damaged repository With any defective hardware found and replaced, the damage done to the repository needs to be ascertained and fixed. :ref:`borg_check` provides diagnostics and ``--repair`` options for repositories with issues. We recommend to first run without ``--repair`` to assess the situation. If the found issues and proposed repairs seem right, re-run "check" with ``--repair`` enabled. Why is the time elapsed in the archive stats different from wall clock time? ---------------------------------------------------------------------------- Borg needs to write the time elapsed into the archive metadata before finalizing the archive, compacting the segments, and committing the repo & cache. This means when Borg is run with e.g. the ``time`` command, the duration shown in the archive stats may be shorter than the full time the command runs for. How do I configure different prune policies for different directories? ---------------------------------------------------------------------- Say you want to prune ``/var/log`` faster than the rest of ``/``. How do we implement that? The answer is to backup to different archive *names* and then implement different prune policies for different prefixes. For example, you could have a script that does:: borg create --exclude /var/log $REPOSITORY:main-$(date +%Y-%m-%d) / borg create $REPOSITORY:logs-$(date +%Y-%m-%d) /var/log Then you would have two different prune calls with different policies:: borg prune --verbose --list -d 30 --prefix main- "$REPOSITORY" borg prune --verbose --list -d 7 --prefix logs- "$REPOSITORY" This will keep 7 days of logs and 30 days of everything else. Borg 1.1 also supports the ``--glob-archives`` parameter. How do I remove files from an existing backup? ---------------------------------------------- Say you now want to remove old logfiles because you changed your backup policy as described above. The only way to do this is to use the :ref:`borg_recreate` command to rewrite all archives with a different ``--exclude`` pattern. See the examples in the :ref:`borg_recreate` manpage for more information. Can I safely change the compression level or algorithm? -------------------------------------------------------- The compression level and algorithm don't affect deduplication. Chunk ID hashes are calculated *before* compression. New compression settings will only be applied to new chunks, not existing chunks. So it's safe to change them. Security ######## .. _borg_security_critique: Isn't BorgBackup's AES-CTR crypto broken? ----------------------------------------- If a nonce (counter) value is reused, AES-CTR mode crypto is broken. To exploit the AES counter management issue, an attacker would need to have access to the borg repository. By tampering with the repo, the attacker could bring the repo into a state so that it reports a lower "highest used counter value" than the one that actually was used. The client would usually notice that, because it rather trusts the clientside stored "highest used counter value" than trusting the server. But there are situations, where this is simply not possible: - If clients A and B used the repo, the client A can only know its own highest CTR value, but not the one produced by B. That is only known to (B and) the server (the repo) and thus the client A needs to trust the server about the value produced by B in that situation. You can't do much about this except not having multiple clients per repo. - Even if there is only one client, if client-side information is completely lost (e.g. due to disk defect), the client also needs to trust the value from server side. You can avoid this by not continuing to write to the repository after you have lost clientside borg information. .. _home_config_borg: How important is the $HOME/.config/borg directory? -------------------------------------------------- The Borg config directory has content that you should take care of: ``security`` subdirectory Each directory here represents one Borg repository by its ID and contains the last known status. If a repository's status is different from this information at the beginning of BorgBackup operation, Borg outputs warning messages and asks for confirmation, so make sure you do not lose or manipulate these files. However, apart from those warnings, a loss of these files can be recovered. ``keys`` subdirectory In this directory all your repository keyfiles are stored. You MUST make sure to have an independent backup of these keyfiles, otherwise you cannot access your backups anymore if you lose them. You also MUST keep these files secret; everyone who gains access to your repository and has the corresponding keyfile (and the key passphrase) can extract it. Make sure that only you have access to the Borg config directory. .. _cache_security: Do I need to take security precautions regarding the cache? ----------------------------------------------------------- The cache contains a lot of metadata information about the files in your repositories and it is not encrypted. However, the assumption is that the cache is being stored on the very same system which also contains the original files which are being backed up. So someone with access to the cache files would also have access the the original files anyway. The Internals section contains more details about :ref:`cache`. If you ever need to move the cache to a different location, this can be achieved by using the appropriate :ref:`env_vars`. How can I specify the encryption passphrase programmatically? ------------------------------------------------------------- There are several ways to specify a passphrase without human intervention: Setting ``BORG_PASSPHRASE`` The passphrase can be specified using the ``BORG_PASSPHRASE`` enviroment variable. This is often the simplest option, but can be insecure if the script that sets it is world-readable. .. _password_env: .. note:: Be careful how you set the environment; using the ``env`` command, a ``system()`` call or using inline shell scripts (e.g. ``BORG_PASSPHRASE=hunter2 borg ...``) might expose the credentials in the process list directly and they will be readable to all users on a system. Using ``export`` in a shell script file should be safe, however, as the environment of a process is `accessible only to that user `_. Using ``BORG_PASSCOMMAND`` with a properly permissioned file Another option is to create a file with a password in it in your home directory and use permissions to keep anyone else from reading it. For example, first create a key:: head -c 32 /dev/urandom | base64 -w 0 > ~/.borg-passphrase chmod 400 ~/.borg-passphrase Then in an automated script one can put:: export BORG_PASSCOMMAND="cat $HOME/.borg-passphrase" and Borg will automatically use that passphrase. Using keyfile-based encryption with a blank passphrase It is possible to encrypt your repository in ``keyfile`` mode instead of the default ``repokey`` mode and use a blank passphrase for the key file (simply press Enter twice when ``borg init`` asks for the password). See :ref:`encrypted_repos` for more details. Using ``BORG_PASSCOMMAND`` with macOS Keychain macOS has a native manager for secrets (such as passphrases) which is safer than just using a file as it is encrypted at rest and unlocked manually (fortunately, the login keyring automatically unlocks when you login). With the built-in ``security`` command, you can access it from the command line, making it useful for ``BORG_PASSCOMMAND``. First generate a passphrase and use ``security`` to save it to your login (default) keychain:: security add-generic-password -D secret -U -a $USER -s borg-passphrase -w $(head -c 32 /dev/urandom | base64 -w 0) In your backup script retrieve it in the ``BORG_PASSCOMMAND``:: export BORG_PASSCOMMAND="security find-generic-password -a $USER -s borg-passphrase -w" Using ``BORG_PASSCOMMAND`` with GNOME Keyring GNOME also has a keyring daemon that can be used to store a Borg passphrase. First ensure ``libsecret-tools``, ``gnome-keyring`` and ``libpam-gnome-keyring`` are installed. If ``libpam-gnome-keyring`` wasn't already installed, ensure it runs on login:: sudo sh -c "echo session optional pam_gnome_keyring.so auto_start >> /etc/pam.d/login" sudo sh -c "echo password optional pam_gnome_keyring.so >> /etc/pam.d/passwd" # you may need to relogin afterwards to activate the login keyring Then add a secret to the login keyring:: head -c 32 /dev/urandom | base64 -w 0 | secret-tool store borg-repository repo-name --label="Borg Passphrase" If a dialog box pops up prompting you to pick a password for a new keychain, use your login password. If there is a checkbox for automatically unlocking on login, check it to allow backups without any user intervention whatsoever. Once the secret is saved, retrieve it in a backup script using ``BORG_PASSCOMMAND``:: export BORG_PASSCOMMAND="secret-tool lookup borg-repository repo-name" .. note:: For this to automatically unlock the keychain it must be run in the ``dbus`` session of an unlocked terminal; for example, running a backup script as a ``cron`` job might not work unless you also ``export DISPLAY=:0`` so ``secret-tool`` can pick up your open session. `It gets even more complicated`__ when you are running the tool as a different user (e.g. running a backup as root with the password stored in the user keyring). __ https://github.com/borgbackup/borg/pull/2837#discussion_r127641330 Using ``BORG_PASSCOMMAND`` with KWallet KDE also has a keychain feature in the form of KWallet. The command-line tool ``kwalletcli`` can be used to store and retrieve secrets. Ensure ``kwalletcli`` is installed, generate a passphrase, and store it in your "wallet":: head -c 32 /dev/urandom | base64 -w 0 | kwalletcli -Pe borg-passphrase -f Passwords Once the secret is saved, retrieve it in a backup script using ``BORG_PASSCOMMAND``:: export BORG_PASSCOMMAND="kwalletcli -e borg-passphrase -f Passwords" When backing up to remote encrypted repos, is encryption done locally? ---------------------------------------------------------------------- Yes, file and directory metadata and data is locally encrypted, before leaving the local machine. We do not mean the transport layer encryption by that, but the data/metadata itself. Transport layer encryption (e.g. when ssh is used as a transport) applies additionally. When backing up to remote servers, do I have to trust the remote server? ------------------------------------------------------------------------ Yes and No. No, as far as data confidentiality is concerned - if you use encryption, all your files/dirs data and metadata are stored in their encrypted form into the repository. Yes, as an attacker with access to the remote server could delete (or otherwise make unavailable) all your backups. How can I protect against a hacked backup client? ------------------------------------------------- Assume you backup your backup client machine C to the backup server S and C gets hacked. In a simple push setup, the attacker could then use borg on C to delete all backups residing on S. These are your options to protect against that: - Do not allow to permanently delete data from the repo, see :ref:`append_only_mode`. - Use a pull-mode setup using ``ssh -R``, see :issue:`900`. - Mount C's filesystem on another machine and then create a backup of it. - Do not give C filesystem-level access to S. See :ref:`hosting_repositories` for a detailed protection guide. How can I protect against a hacked backup server? ------------------------------------------------- Just in case you got the impression that pull-mode backups are way more safe than push-mode, you also need to consider the case that your backup server S gets hacked. In case S has access to a lot of clients C, that might bring you into even bigger trouble than a hacked backup client in the previous FAQ entry. These are your options to protect against that: - Use the standard push-mode setup (see also previous FAQ entry). - Mount (the repo part of) S's filesystem on C. - Do not give S file-system level access to C. - Have your backup server at a well protected place (maybe not reachable from the internet), configure it safely, apply security updates, monitor it, ... How can I protect against theft, sabotage, lightning, fire, ...? ---------------------------------------------------------------- In general: if your only backup medium is nearby the backupped machine and always connected, you can easily get into trouble: they likely share the same fate if something goes really wrong. Thus: - have multiple backup media - have media disconnected from network, power, computer - have media at another place - have a relatively recent backup on your media How do I report a security issue with Borg? ------------------------------------------- Send a private email to the :ref:`security contact ` if you think you have discovered a security issue. Please disclose security issues responsibly. Common issues ############# Why does Borg extract hang after some time? ------------------------------------------- When I do a ``borg extract``, after a while all activity stops, no cpu usage, no downloads. This may happen when the SSH connection is stuck on server side. You can configure SSH on client side to prevent this by sending keep-alive requests, for example in ~/.ssh/config: :: Host borg.example.com # Client kills connection after 3*30 seconds without server response: ServerAliveInterval 30 ServerAliveCountMax 3 You can also do the opposite and configure SSH on server side in /etc/ssh/sshd_config, to make the server send keep-alive requests to the client: :: # Server kills connection after 3*30 seconds without client response: ClientAliveInterval 30 ClientAliveCountMax 3 How can I deal with my very unstable SSH connection? ---------------------------------------------------- If you have issues with lost connections during long-running borg commands, you could try to work around: - Make partial extracts like ``borg extract REPO PATTERN`` to do multiple smaller extraction runs that complete before your connection has issues. - Try using ``borg mount REPO MOUNTPOINT`` and ``rsync -avH`` from ``MOUNTPOINT`` to your desired extraction directory. If the connection breaks down, just repeat that over and over again until rsync does not find anything to do any more. Due to the way borg mount works, this might be less efficient than borg extract for bigger volumes of data. Why do I get "connection closed by remote" after a while? --------------------------------------------------------- When doing a backup to a remote server (using a ssh: repo URL), it sometimes stops after a while (some minutes, hours, ... - not immediately) with "connection closed by remote" error message. Why? That's a good question and we are trying to find a good answer in :issue:`636`. Why am I seeing idle borg serve processes on the repo server? ------------------------------------------------------------- Maybe the ssh connection between client and server broke down and that was not yet noticed on the server. Try these settings: :: # /etc/ssh/sshd_config on borg repo server - kill connection to client # after ClientAliveCountMax * ClientAliveInterval seconds with no response ClientAliveInterval 20 ClientAliveCountMax 3 If you have multiple borg create ... ; borg create ... commands in a already serialized way in a single script, you need to give them ``--lock-wait N`` (with N being a bit more than the time the server needs to terminate broken down connections and release the lock). .. _disable_archive_chunks: The borg cache eats way too much disk space, what can I do? ----------------------------------------------------------- This may especially happen if borg needs to rebuild the local "chunks" index - either because it was removed, or because it was not coherent with the repository state any more (e.g. because another borg instance changed the repository). To optimize this rebuild process, borg caches per-archive information in the ``chunks.archive.d/`` directory. It won't help the first time it happens, but it will make the subsequent rebuilds faster (because it needs to transfer less data from the repository). While being faster, the cache needs quite some disk space, which might be unwanted. There is a temporary (but maybe long lived) hack to avoid using lots of disk space for chunks.archive.d (see :issue:`235` for details): :: # this assumes you are working with the same user as the backup. cd ~/.cache/borg/$(borg config /path/to/repo id) rm -rf chunks.archive.d ; touch chunks.archive.d This deletes all the cached archive chunk indexes and replaces the directory that kept them with a file, so borg won't be able to store anything "in" there in future. This has some pros and cons, though: - much less disk space needs for ~/.cache/borg. - chunk cache resyncs will be slower as it will have to transfer chunk usage metadata for all archives from the repository (which might be slow if your repo connection is slow) and it will also have to build the hashtables from that data. chunk cache resyncs happen e.g. if your repo was written to by another machine (if you share same backup repo between multiple machines) or if your local chunks cache was lost somehow. The long term plan to improve this is called "borgception", see :issue:`474`. Can I backup my root partition (/) with Borg? --------------------------------------------- Backing up your entire root partition works just fine, but remember to exclude directories that make no sense to backup, such as /dev, /proc, /sys, /tmp and /run, and to use ``--one-file-system`` if you only want to backup the root partition (and not any mounted devices e.g.). If it crashes with a UnicodeError, what can I do? ------------------------------------------------- Check if your encoding is set correctly. For most POSIX-like systems, try:: export LANG=en_US.UTF-8 # or similar, important is correct charset I can't extract non-ascii filenames by giving them on the commandline!? ----------------------------------------------------------------------- This might be due to different ways to represent some characters in unicode or due to other non-ascii encoding issues. If you run into that, try this: - avoid the non-ascii characters on the commandline by e.g. extracting the parent directory (or even everything) - mount the repo using FUSE and use some file manager .. _expected_performance: What's the expected backup performance? --------------------------------------- A first backup will usually be somehow "slow" because there is a lot of data to process. Performance here depends on a lot of factors, so it is hard to give specific numbers. Subsequent backups are usually very fast if most files are unchanged and only a few are new or modified. The high performance on unchanged files primarily depends only on a few factors (like fs recursion + metadata reading performance and the files cache working as expected) and much less on other factors. E.g., for this setup: - server grade machine (4C/8T 2013 Xeon, 64GB RAM, 2x good 7200RPM disks) - local zfs filesystem (mirrored) containing the backup source data - repository is remote (does not matter much for unchanged files) - backup job runs while machine is otherwise idle The observed performance is that |project_name| can process about **1 million unchanged files (and a few small changed ones) in 4 minutes!** If you are seeing much less than that in similar circumstances, read the next few FAQ entries below. .. _slow_backup: Why is backup slow for me? -------------------------- So, if you feel your |project_name| backup is too slow somehow, you should find out why. The usual way to approach this is to add ``--list --filter=AME --stats`` to your ``borg create`` call to produce more log output, including a file list (with file status characters) and also some statistics at the end of the backup. Then you do the backup and look at the log output: - stats: Do you really have little changes or are there more changes than you thought? In the stats you can see the overall volume of changed data, which needed to be added to the repo. If that is a lot, that can be the reason why it is slow. - ``A`` status ("added") in the file list: If you see that often, you have a lot of new files (files that |project_name| did not find in the files cache). If you think there is something wrong with that (the file was there already in the previous backup), please read the FAQ entries below. - ``M`` status ("modified") in the file list: If you see that often, |project_name| thinks that a lot of your files might be modified (|project_name| found them in the files cache, but the metadata read from the filesystem did not match the metadata stored in the files cache). In such a case, |project_name| will need to process the files' contents completely, which is much slower than processing unmodified files (|project_name| does not read their contents!). The metadata values used in this comparison are determined by the ``--files-cache`` option and could be e.g. size, ctime and inode number (see the ``borg create`` docs for more details and potential issues). You can use the ``stat`` command on files to manually look at fs metadata to debug if there is any unexpected change triggering the ``M`` status. See also the next few FAQ entries for more details. .. _a_status_oddity: I am seeing 'A' (added) status for an unchanged file!? ------------------------------------------------------ The files cache is used to determine whether |project_name| already "knows" / has backed up a file and if so, to skip the file from chunking. It does intentionally *not* contain files that have a timestamp same as the newest timestamp in the created archive. So, if you see an 'A' status for unchanged file(s), they are likely the files with the most recent timestamp in that archive. This is expected: it is to avoid data loss with files that are backed up from a snapshot and that are immediately changed after the snapshot (but within timestamp granularity time, so the timestamp would not change). Without the code that removes these files from the files cache, the change that happened right after the snapshot would not be contained in the next backup as |project_name| would think the file is unchanged. This does not affect deduplication, the file will be chunked, but as the chunks will often be the same and already stored in the repo (except in the above mentioned rare condition), it will just re-use them as usual and not store new data chunks. If you want to avoid unnecessary chunking, just create or touch a small or empty file in your backup source file set (so that one has the latest timestamp, not your 50GB VM disk image) and, if you do snapshots, do the snapshot after that. Since only the files cache is used in the display of files status, those files are reported as being added when, really, chunks are already used. By default, ctime (change time) is used for the timestamps to have a rather safe change detection (see also the --files-cache option). Furthermore, pathnames recorded in files cache are always absolute, even if you specify source directories with relative pathname. If relative pathnames are stable, but absolute are not (for example if you mount a filesystem without stable mount points for each backup or if you are running the backup from a filesystem snapshot whose name is not stable), borg will assume that files are different and will report them as 'added', even though no new chunks will be actually recorded for them. To avoid this, you could bind mount your source directory in a directory with the stable path. .. _always_chunking: It always chunks all my files, even unchanged ones! --------------------------------------------------- |project_name| maintains a files cache where it remembers the timestamp, size and inode of files. When |project_name| does a new backup and starts processing a file, it first looks whether the file has changed (compared to the values stored in the files cache). If the values are the same, the file is assumed unchanged and thus its contents won't get chunked (again). |project_name| can't keep an infinite history of files of course, thus entries in the files cache have a "maximum time to live" which is set via the environment variable BORG_FILES_CACHE_TTL (and defaults to 20). Every time you do a backup (on the same machine, using the same user), the cache entries' ttl values of files that were not "seen" are incremented by 1 and if they reach BORG_FILES_CACHE_TTL, the entry is removed from the cache. So, for example, if you do daily backups of 26 different data sets A, B, C, ..., Z on one machine (using the default TTL), the files from A will be already forgotten when you repeat the same backups on the next day and it will be slow because it would chunk all the files each time. If you set BORG_FILES_CACHE_TTL to at least 26 (or maybe even a small multiple of that), it would be much faster. Another possible reason is that files don't always have the same path, for example if you mount a filesystem without stable mount points for each backup or if you are running the backup from a filesystem snapshot whose name is not stable. If the directory where you mount a filesystem is different every time, |project_name| assumes they are different files. This is true even if you backup these files with relative pathnames - borg uses full pathnames in files cache regardless. Is there a way to limit bandwidth with |project_name|? ------------------------------------------------------ To limit upload (i.e. :ref:`borg_create`) bandwidth, use the ``--remote-ratelimit`` option. There is no built-in way to limit *download* (i.e. :ref:`borg_extract`) bandwidth, but limiting download bandwidth can be accomplished with pipeviewer_: Create a wrapper script: /usr/local/bin/pv-wrapper :: #!/bin/sh ## -q, --quiet do not output any transfer information at all ## -L, --rate-limit RATE limit transfer to RATE bytes per second RATE=307200 pv -q -L $RATE | "$@" Add BORG_RSH environment variable to use pipeviewer wrapper script with ssh. :: export BORG_RSH='/usr/local/bin/pv-wrapper ssh' Now |project_name| will be bandwidth limited. Nice thing about pv is that you can change rate-limit on the fly: :: pv -R $(pidof pv) -L 102400 .. _pipeviewer: http://www.ivarch.com/programs/pv.shtml How can I avoid unwanted base directories getting stored into archives? ----------------------------------------------------------------------- Possible use cases: - Another file system is mounted and you want to backup it with original paths. - You have created a BTRFS snapshot in a ``/.snapshots`` directory for backup. To achieve this, run ``borg create`` within the mountpoint/snapshot directory: :: # Example: Some file system mounted in /mnt/rootfs. cd /mnt/rootfs borg create /path/to/repo::rootfs_backup . I am having troubles with some network/FUSE/special filesystem, why? -------------------------------------------------------------------- |project_name| is doing nothing special in the filesystem, it only uses very common and compatible operations (even the locking is just "mkdir"). So, if you are encountering issues like slowness, corruption or malfunction when using a specific filesystem, please try if you can reproduce the issues with a local (non-network) and proven filesystem (like ext4 on Linux). If you can't reproduce the issue then, you maybe have found an issue within the filesystem code you used (not with |project_name|). For this case, it is recommended that you talk to the developers / support of the network fs and maybe open an issue in their issue tracker. Do not file an issue in the |project_name| issue tracker. If you can reproduce the issue with the proven filesystem, please file an issue in the |project_name| issue tracker about that. Why does running 'borg check --repair' warn about data loss? ------------------------------------------------------------ Repair usually works for recovering data in a corrupted archive. However, it's impossible to predict all modes of corruption. In some very rare instances, such as malfunctioning storage hardware, additional repo corruption may occur. If you can't afford to lose the repo, it's strongly recommended that you perform repair on a copy of the repo. In other words, the warning is there to emphasize that |project_name|: - Will perform automated routines that modify your backup repository - Might not actually fix the problem you are experiencing - Might, in very rare cases, further corrupt your repository In the case of malfunctioning hardware, such as a drive or USB hub corrupting data when read or written, it's best to diagnose and fix the cause of the initial corruption before attempting to repair the repo. If the corruption is caused by a one time event such as a power outage, running `borg check --repair` will fix most problems. Why isn't there more progress / ETA information displayed? ---------------------------------------------------------- Some borg runs take quite a bit, so it would be nice to see a progress display, maybe even including a ETA (expected time of "arrival" [here rather "completion"]). For some functionality, this can be done: if the total amount of work is more or less known, we can display progress. So check if there is a ``--progress`` option. But sometimes, the total amount is unknown (e.g. for ``borg create`` we just do a single pass over the filesystem, so we do not know the total file count or data volume before reaching the end). Adding another pass just to determine that would take additional time and could be incorrect, if the filesystem is changing. Even if the fs does not change and we knew count and size of all files, we still could not compute the ``borg create`` ETA as we do not know the amount of changed chunks, how the bandwidth of source and destination or system performance might fluctuate. You see, trying to display ETA would be futile. The borg developers prefer to rather not implement progress / ETA display than doing futile attempts. See also: https://xkcd.com/612/ Why am I getting 'Operation not permitted' errors when backing up on sshfs? --------------------------------------------------------------------------- By default, ``sshfs`` is not entirely POSIX-compliant when renaming files due to a technicality in the SFTP protocol. Fortunately, it also provides a workaround_ to make it behave correctly:: sshfs -o workaround=rename user@host:dir /mnt/dir .. _workaround: https://unix.stackexchange.com/a/123236 Can I disable checking for free disk space? ------------------------------------------- In some cases, the free disk space of the target volume is reported incorrectly. This can happen for CIFS- or FUSE shares. If you are sure that your target volume will always have enough disk space, you can use the following workaround to disable checking for free disk space:: borg config -- $REPO_LOCATION additional_free_space -2T Miscellaneous ############# Requirements for the borg single-file binary, esp. (g)libc? ----------------------------------------------------------- We try to build the binary on old, but still supported systems - to keep the minimum requirement for the (g)libc low. The (g)libc can't be bundled into the binary as it needs to fit your kernel and OS, but Python and all other required libraries will be bundled into the binary. If your system fulfills the minimum (g)libc requirement (see the README that is released with the binary), there should be no problem. If you are slightly below the required version, maybe just try. Due to the dynamic loading (or not loading) of some shared libraries, it might still work depending on what libraries are actually loaded and used. In the borg git repository, there is scripts/glibc_check.py that can determine (based on the symbols' versions they want to link to) whether a set of given (Linux) binaries works with a given glibc version. Why was Borg forked from Attic? ------------------------------- Borg was created in May 2015 in response to the difficulty of getting new code or larger changes incorporated into Attic and establishing a bigger developer community / more open development. More details can be found in `ticket 217 `_ that led to the fork. Borg intends to be: * simple: * as simple as possible, but no simpler * do the right thing by default, but offer options * open: * welcome feature requests * accept pull requests of good quality and coding style * give feedback on PRs that can't be accepted "as is" * discuss openly, don't work in the dark * changing: * Borg is not compatible with Attic * do not break compatibility accidentally, without a good reason or without warning. allow compatibility breaking for other cases. * if major version number changes, it may have incompatible changes Migrating from Attic #################### What are the differences between Attic and Borg? ------------------------------------------------ Borg is a fork of `Attic`_ and maintained by "`The Borg collective`_". .. _Attic: https://github.com/jborg/attic .. _The Borg collective: https://borgbackup.readthedocs.org/en/latest/authors.html Here's a (incomplete) list of some major changes: * lots of attic issues fixed (see `issue #5 `_), including critical data corruption bugs and security issues. * more open, faster paced development (see `issue #1 `_) * less chunk management overhead (less memory and disk usage for chunks index) * faster remote cache resync (useful when backing up multiple machines into same repo) * compression: no, lz4, zstd, zlib or lzma compression, adjustable compression levels * repokey replaces problematic passphrase mode (you can't change the passphrase nor the pbkdf2 iteration count in "passphrase" mode) * simple sparse file support, great for virtual machine disk files * can read special files (e.g. block devices) or from stdin, write to stdout * mkdir-based locking is more compatible than attic's posix locking * uses fadvise to not spoil / blow up the fs cache * better error messages / exception handling * better logging, screen output, progress indication * tested on misc. Linux systems, 32 and 64bit, FreeBSD, OpenBSD, NetBSD, macOS Please read the :ref:`changelog` (or ``docs/changes.rst`` in the source distribution) for more information. Borg is not compatible with original Attic (but there is a one-way conversion). How do I migrate from Attic to Borg? ------------------------------------ Use :ref:`borg_upgrade`. This is a one-way process that cannot be reversed. There are some caveats: - The upgrade can only be performed on local repositories. It cannot be performed on remote repositories. - If the repository is in "keyfile" encryption mode, the keyfile must exist locally or it must be manually moved after performing the upgrade: 1. Get the repository ID with ``borg config /path/to/repo id``. 2. Locate the attic key file at ``~/.attic/keys/``. The correct key for the repository starts with the line ``ATTIC_KEY ``. 3. Copy the attic key file to ``~/.config/borg/keys/`` 4. Change the first line from ``ATTIC_KEY ...`` to ``BORG_KEY ...``. 5. Verify that the repository is now accessible (e.g. ``borg list ``). - Attic and Borg use different :ref:`"chunker params" `. This means that data added by Borg won't deduplicate with the existing data stored by Attic. The effect is lessened if the files cache is used with Borg. - Repositories in "passphrase" mode *must* be migrated to "repokey" mode using :ref:`borg_key_migrate-to-repokey`. Borg does not support the "passphrase" mode any other way. Why is my backup bigger than with attic? ---------------------------------------- Attic was rather unflexible when it comes to compression, it always compressed using zlib level 6 (no way to switch compression off or adjust the level or algorithm). The default in Borg is lz4, which is fast enough to not use significant CPU time in most cases, but can only achieve modest compression. It still compresses easily compressed data fairly well. Borg also offers zstd, zlib and lzma compression, choose wisely. Which choice is the best option depends on a number of factors, like bandwidth to the repository, how well the data compresses, available CPU power and so on. borgbackup-1.1.15/docs/introduction.rst0000644000175000017500000000035013771325506020106 0ustar useruser00000000000000Introduction ============ .. this shim is here to fix the structure in the PDF rendering. without this stub, the elements in the toctree of index.rst show up a level below the README file included .. include:: ../README.rst borgbackup-1.1.15/docs/borg_theme/0000755000175000017500000000000013771325773016756 5ustar useruser00000000000000borgbackup-1.1.15/docs/borg_theme/css/0000755000175000017500000000000013771325773017546 5ustar useruser00000000000000borgbackup-1.1.15/docs/borg_theme/css/borg.css0000644000175000017500000001046413771325506021210 0ustar useruser00000000000000@import url("theme.css"); /* The Return of the Borg. * * Have a bit green and grey and darkness (and if only in the upper left corner). */ .wy-side-nav-search { background-color: #000000 !important; } .wy-side-nav-search input[type="text"] { border-color: #000000; } .wy-side-nav-search > a { color: rgba(255, 255, 255, 0.5); } .wy-side-nav-search > div.version { color: rgba(255, 255, 255, 0.5); } dt code { font-weight: normal; } #internals .toctree-wrapper > ul { column-count: 3; -webkit-column-count: 3; } #internals .toctree-wrapper > ul > li { display: inline-block; font-weight: bold; } #internals .toctree-wrapper > ul > li > ul { font-weight: normal; } /* bootstrap has a .container class which clashes with docutils' container class. */ .docutils.container { width: auto; margin: 0; padding: 0; } /* the default (38px) produces a jumpy baseline in Firefox on Linux. */ h1 { font-size: 36px; } .text-logo { background-color: #000200; color: #00dd00; } .text-logo:hover, .text-logo:active, .text-logo:focus { color: #5afe57; } /* by default the top and bottom margins are unequal which looks a bit unbalanced. */ .sidebar-block { padding: 0; margin: 14px 0 24px 0; } #borg-documentation h1 + p .external img { width: 100%; } .container.experimental, #debugging-facilities, #borg-recreate { /* don't change text dimensions */ margin: 0 -30px; /* padding below + border width */ padding: 0 10px; /* 10 px visual margin between edge of text and the border */ /* fallback for browsers that don't have repeating-linear-gradient: thick, red lines */ border-left: 20px solid red; border-right: 20px solid red; /* fancy red stripes */ border-image: repeating-linear-gradient( -45deg,rgba(255,0,0,0.1) 0,rgba(255,0,0,0.75) 10px,rgba(0,0,0,0) 10px,rgba(0,0,0,0) 20px,rgba(255,0,0,0.75) 20px) 0 20 repeat; } .topic { margin: 0 1em; padding: 0 1em; /* #4e4a4a = background of the ToC sidebar */ border-left: 2px solid #4e4a4a;; border-right: 2px solid #4e4a4a;; } table.docutils:not(.footnote) td, table.docutils:not(.footnote) th { padding: .2em; } table.docutils:not(.footnote) { border-collapse: collapse; border: none; } table.docutils:not(.footnote) td, table.docutils:not(.footnote) th { border: 1px solid #ddd; } table.docutils:not(.footnote) tr:first-child th, table.docutils:not(.footnote) tr:first-child td { border-top: 0; } table.docutils:not(.footnote) tr:last-child td { border-bottom: 0; } table.docutils:not(.footnote) tr td:first-child, table.docutils:not(.footnote) tr th:first-child { border-left: 0; } table.docutils:not(.footnote) tr td:last-child, table.docutils:not(.footnote) tr th:last-child, table.docutils.borg-options-table tr td { border-right: 0; } table.docutils.option-list tr td, table.docutils.borg-options-table tr td { border-left: 0; border-right: 0; } table.docutils.borg-options-table tr td:first-child:not([colspan="3"]) { border-top: 0; border-bottom: 0; } .borg-options-table td[colspan="3"] p { margin: 0; } .borg-options-table { width: 100%; } kbd, /* used in usage pages for options */ code, .rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal, .rst-content tt, .rst-content code, p .literal, p .literal span { border: none; padding: 0; color: black; /* slight contrast with #404040 of regular text */ background: none; } kbd { box-shadow: none; line-height: 23px; word-wrap: normal; font-size: 15px; font-family: Consolas, monospace; } .borg-options-table tr td:nth-child(2) .pre { white-space: nowrap; } .borg-options-table tr td:first-child { width: 2em; } cite { white-space: nowrap; color: black; /* slight contrast with #404040 of regular text */ font-family: Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace; font-style: normal; text-decoration: underline; } .borg-common-opt-ref { font-weight: bold; } .sidebar-toc ul li.toctree-l2 a, .sidebar-toc ul li.toctree-l3 a { padding-right: 25px; } #common-options .option { white-space: nowrap; } borgbackup-1.1.15/docs/_static/0000755000175000017500000000000013771325773016271 5ustar useruser00000000000000borgbackup-1.1.15/docs/_static/logo_font.txt0000644000175000017500000000015713771325506021015 0ustar useruser00000000000000Black Ops One James Grieshaber SIL Open Font License, 1.1 https://www.google.com/fonts/specimen/Black+Ops+One borgbackup-1.1.15/docs/_static/favicon.ico0000644000175000017500000011401613771325506020407 0ustar useruser00000000000000RR m6RRFmRR"&u(R PP aaaa PPKL<M>>>>>>?BuL0i9iuL0i9iuL0i9iCuL%|cs] cvu"uL4uL4uL4uL<>>>4B\\\\\3uL0 4uL064uL0-4uL`* 3e`Q=Lf9CflFLg9 glHLh9 hlJL%|L0L0L0PP aaaa PP?@@@@@@?(R(R "%*-03469<=>?BCFHJKLMQ\]`cefghilsuv|S\\\\\\\\' R\\\\\\\\\\(\\\\\\\\\\\\) !Q\\\\\"G\\\\\6(\\\\\U\\\\\\\;3\\\\N\\\Q9\\\\\D3\\\UG\\\\\6(\\\\\U\\\\\\\\;3\\\\\N\\\\P 9\\\\\D3\\\\\UG\\\\\6(\\\\\U\\\\\\\\\;3\\\\\\N\\\\\P 9\\\\\D3\\\\\\\#G\\\\\6(\\\\\UEGGGW\\\\\8.\\\\\T5,>\\\\\N 9\\\\\D.\\\\\L76G\\\\\6(\\\\\U<\\\\\PD\\\\\@\\\\\\9\\\\\DD\\\\\<G\\\\\6(\\\\\U<\\\\\PD\\\\\@\\\\\\9\\\\\DD\\\\\<G\\\\\6(\\\\\U<\\\\\PD\\\\\@\\\\\\9\\\\\DD\\\\\<G\\\\\6(\\\\\U  A\\\\\?D\\\\\@\\\\\\9\\\\\D"+++++D\\\\\<G\\\\\6(\\\\\U\\\\\\\\\AD\\\\\@\\\\\\9\\\\\DF\\\\\<D\\\\\<G\\\\\6(\\\\\U\\\\\\\\\D\\\\\@\\\\\\9\\\\\DF\\\\\<D\\\\\<G\\\\\6(\\\\\U\\\\\\\\\XD\\\\\@\\\\\\9\\\\\DF\\\\\<D\\\\\<G\\\\\6(\\\\\U<\\\\\M-\\\\\YH=R\\\\\L 9\\\\\DIJZ\\\\\/-\\\\\<*JJ[\\\\\(\\\\\U<\\\\\P0\\\\\\N\\\\\N 9\\\\\D#\\\\\\\@0\\\\<4\\\\\\\$(\\\\\U<\\\\\P1\\\\\N\\\\O 9\\\\\DB\\\\\K 1\\\<4\\\\\\%(\\\\\U<\\\\\P2\\\\N\\\Q 9\\\\\DC\\\V2\\<4\\\\\&(\\\\\UEGGGW\\\\\8(\\\\\U\\\\\\\\\:(\\\\\U\\\\\\\\:(\\\\\U\\\\\\\:borgbackup-1.1.15/docs/_static/logo.svg0000644000175000017500000000355113771325506017750 0ustar useruser00000000000000 borgbackup-1.1.15/docs/_static/logo.pdf0000644000175000017500000000225713771325506017724 0ustar useruser00000000000000%PDF-1.5 % 3 0 obj << /Length 4 0 R /Filter /FlateDecode >> stream x}TIR1 ~%y3x! y\ Eݖڭ%| '?F%x$+8ZOB@b M,3v5Cb0gI u'rAͳ(T!,1 p@0¦g, 0,l=**٫4KRRhyi(d Z]gM7*!Y6Z=Y]o91OsG ،@'.bIz~ x~-USl2P(e 60hIac*Yz.֍ZEجBԻa6&pIB\gnka6뉗n$:cdb-GAvͦRݰ5,A"i9us7/Y;2/m|V5(8e羪0b-~ endstream endobj 4 0 obj 440 endobj 2 0 obj << /ExtGState << /a0 << /CA 1 /ca 1 >> >> >> endobj 5 0 obj << /Type /Page /Parent 1 0 R /MediaBox [ 0 0 240 100 ] /Contents 3 0 R /Group << /Type /Group /S /Transparency /I true /CS /DeviceRGB >> /Resources 2 0 R >> endobj 1 0 obj << /Type /Pages /Kids [ 5 0 R ] /Count 1 >> endobj 7 0 obj << /Type /Catalog /Pages 1 0 R >> endobj xref 0 8 0000000000 65535 f 0000000840 00000 n 0000000554 00000 n 0000000015 00000 n 0000000532 00000 n 0000000626 00000 n 0000000905 00000 n 0000001032 00000 n trailer << /Size 8 /Root 7 0 R /Info 6 0 R >> startxref 1084 %%EOF borgbackup-1.1.15/docs/_static/logo.png0000644000175000017500000000314513771325506017734 0ustar useruser00000000000000PNG  IHDRd pHYs  IDATxkec QZd Dd`DH ERCSJ3+ 2B*%BAz@VH=3;Ϟggf?yfyhARN- $,KXd%`K2 d%`IX $,K2d%`KJIcF33 0hΩc&_Yl 2`ŖNSw* ..p.b /wt p[> N,M&bpkj|b0"rG ^^@8VK?ΰͽ߻IVLY7m} y:Q}v~h"XVr>%8%7 . p076\<6L 4cA;]ϥR.#ЬV"T>CfD3x)N ߬2y Y!\ 7x@4x2c9y c&\W pqvs[ n7FuOϹͣH>g+o^!l:\ J>bq8W =?xp}Έ}N# :_IѳПpbC ,.ɧFЌnnq9Uzw D3d-p9<$vUՓ0oK=8IpHIi; pZ~A8-MXœԒNH3ᒵ2F89xD!\O,2ON'jrb%xM%f:aR ZJ9t|kN8w'>Jќr&wZ $,KXd%`G?$޻[vGs;=2o)'0Q_{ >; \L 8Ͳ6'=InnG5L>8GshUH w_W9OWtՃcsr0w[m"\~x7f"ϯ-eڦwvXg"k-0RKvbp(٭H} p폇Wksjom ΡwffPN8dkk/]*G,K}v߱\C~bP[U1H6Xix+Rq޼ޤ<_+ $+cn)qn}$uoi!.>?TFv9CJ $,[ilN͹_4>rYhկ`90@봂}N= 2 (this allows the rolling hash some freedom to make its cut at a place determined by the windows contents rather than the min/max. chunk size). Default: 21 (statistically, chunks will be about 2^21 == 2MiB in size) HASH_WINDOW_SIZE: the size of the window used for the rolling hash computation. Default: 4095B Trying it out ============= I backed up a VM directory to demonstrate how different chunker parameters influence repo size, index size / chunk count, compression, deduplication. repo-sm: ~64kiB chunks (16 bits chunk mask), min chunk size 1kiB (2^10B) (these are attic / borg 0.23 internal defaults) repo-lg: ~1MiB chunks (20 bits chunk mask), min chunk size 64kiB (2^16B) repo-xl: 8MiB chunks (2^23B max chunk size), min chunk size 64kiB (2^16B). The chunk mask bits was set to 31, so it (almost) never triggers. This degrades the rolling hash based dedup to a fixed-offset dedup as the cutting point is now (almost) always the end of the buffer (at 2^23B == 8MiB). The repo index size is an indicator for the RAM needs of Borg. In this special case, the total RAM needs are about 2.1x the repo index size. You see index size of repo-sm is 16x larger than of repo-lg, which corresponds to the ratio of the different target chunk sizes. Note: RAM needs were not a problem in this specific case (37GB data size). But just imagine, you have 37TB of such data and much less than 42GB RAM, then you'ld definitely want the "lg" chunker params so you only need 2.6GB RAM. Or even bigger chunks than shown for "lg" (see "xl"). You also see compression works better for larger chunks, as expected. Duplication works worse for larger chunks, also as expected. small chunks ============ $ borg info /extra/repo-sm::1 Command line: /home/tw/w/borg-env/bin/borg create --chunker-params 10,23,16,4095 /extra/repo-sm::1 /home/tw/win Number of files: 3 Original size Compressed size Deduplicated size This archive: 37.12 GB 14.81 GB 12.18 GB All archives: 37.12 GB 14.81 GB 12.18 GB Unique chunks Total chunks Chunk index: 378374 487316 $ ls -l /extra/repo-sm/index* -rw-rw-r-- 1 tw tw 20971538 Jun 20 23:39 index.2308 $ du -sk /extra/repo-sm 11930840 /extra/repo-sm large chunks ============ $ borg info /extra/repo-lg::1 Command line: /home/tw/w/borg-env/bin/borg create --chunker-params 16,23,20,4095 /extra/repo-lg::1 /home/tw/win Number of files: 3 Original size Compressed size Deduplicated size This archive: 37.10 GB 14.60 GB 13.38 GB All archives: 37.10 GB 14.60 GB 13.38 GB Unique chunks Total chunks Chunk index: 25889 29349 $ ls -l /extra/repo-lg/index* -rw-rw-r-- 1 tw tw 1310738 Jun 20 23:10 index.2264 $ du -sk /extra/repo-lg 13073928 /extra/repo-lg xl chunks ========= (borg-env)tw@tux:~/w/borg$ borg info /extra/repo-xl::1 Command line: /home/tw/w/borg-env/bin/borg create --chunker-params 16,23,31,4095 /extra/repo-xl::1 /home/tw/win Number of files: 3 Original size Compressed size Deduplicated size This archive: 37.10 GB 14.59 GB 14.59 GB All archives: 37.10 GB 14.59 GB 14.59 GB Unique chunks Total chunks Chunk index: 4319 4434 $ ls -l /extra/repo-xl/index* -rw-rw-r-- 1 tw tw 327698 Jun 21 00:52 index.2011 $ du -sk /extra/repo-xl/ 14253464 /extra/repo-xl/ borgbackup-1.1.15/docs/misc/borg-data-flow.png0000644000175000017500000030354713771325506021117 0ustar useruser00000000000000PNG  IHDRPEehJsRGBgAMA a pHYs.#.#x?vIDATxee}'/c~q*fƤ*Qz2vc &# ( D*\HQ"vM7ܺ|~X}9g_^?}^k^{>~ T*@ @PT*T*@ @PT*̃_WՏ~Y,韪O#xSl2N*ծ]FW_]=<9"YSw)u]|WTVTi-sΩ~{mIlê8۰ϟKp~\L=<o߭y;GDx/+`u>3>lkN:묳_5wn4za'@S)e]xUҲ*?Emt:͠{. J9B Y8.(]}ub:'5 @-(IuuԢVFGZVӼ#Q1?Ou kޫ;!H)+N؋c0~Su]-(Hk\v @XؐKS˳uj^YѣU߁uE{5u~n|i9B 23 Q} @X8y@RȓW1u)1Қji?unT Pg5G}n9g4jb8էTSVo[R tgem595ny=GPauԸ>l(5UPN~'tTU*@u϶_^k:P}#B,U*@e5.Y;B=߶/{cz?I`=W{ibYmF:ry%ܴM5m[|,߇i/kj:.m)CCXf \㱱=igZ~|Խ]$1=\~5H7@ͷ)g6 =M6{$C)xsjlwy#\nzqna,oTMMMsT퇮m}mt[爟 ܻ!l<6w?~D/R}} *cMzӡJ΃FU]woXJH6 PRb@O5[w*@m R`5]]'MZu׾z RK Pc=c}i]έ'4n~v_xsF<>jY!1uu  P S4]wjlYu:5 0*?Zimm]aTjʧ>!$-eAΤ爍 Psl ZR{y.(_x|9EBk8/;U9 ?;ƻtgJ:zkP3Pf4w.qԲ+] hR\Ƿ]|mJIԴt Z0#1P.5-4HLAF˝FZR m0:i~n]Kh8֣Y7z i+/)tȕ牾XItJź*qH_ \'9N#Ю B4vLk,*UI^Ce窺ף sێqM򘙇y}ާ;3<7}֗oۗu]wXθ- m7TBm fv ~C߶uUIuhjF:Iq5P7Cv5 *^]Ig0صMf]-e2iz4o"Ug[ PWקmȹ-ɏIZO>C7#6:@mIPջ*֥Pu_پ׫\Iot4)Պt}.h_&}.ZN;@m߷I^ۨgs6UI2@` }|[3b bԡP>-dZ54++%~' PjuQj>3ry}C!UBIþce>%}˂\狰WylםTWW֭;N]_ YY P L`u]O8jנ >i1 bԡs:[CKU},!Q8>AH)$y6`r =N  5uyo:8ب׷ܶq煮sIZ |;P*B k0gP:mR5jW:r TL#u{k#q{1֧6?9L+Vh\.zx `lq޷Z~ ר|瞭/ðPohuzOs*q|nqq?S!@tZBK) MAjS,YY}yPˁP~'6mUINk@-C#@]u@oTkˬ;njH㴎pv8tL30:翜4)<1*@M7죜fCsn>"*cgZDu.' P^cuCJ4b^XdqξUPgQ7nM 26Mc1@]MsOYP0ɃuԺ,s:E:؈u Ϻh37+ۖ^u9ԺEyo6jӗN]濭 iۖ5odϭlN R7pZ 1o()b=6"@,}ץi|_?In3vU*O3@mzmgsҪMum.PN9Pg]!ڬYFܿ*?{OU~oZs6M#ljJTi8+yPWl^|Tԕ PM{ԲiYΫٵ_4eVc#I1ЩO9t{1_i/}"ljL!7[=-M8#Q>~q6 Y״?TC:\,y4Ui܎4Biչܞ>aJ;a7U2N3@ZP -yi]QV0cohLj}]L *hjyvdVjIFYvI>Z*ˆeT7Y*az=@bMYڵuszN+ZV \Ԯj}?ب| .ægi CuI~U(afCu{Ӽ P*@{U;4tܛHM3g+/]tW2{"i`7 @ڶmr{{طuw4(MS7_w\ˮc ;ب[둞i}-{Yyi \ofUO_۶ж-qѵO#@M.o/M|R嗨iۮwmm1|/FE/wuyLa2iЛ_ @@ |P46]/jZ:1Dj]5Qw4 4AZn:sN)7sGi>;TX}Z9*T UMw dR]Juq1?PXVQCT'gOS@ܳ>*2"Ϳ+Ej̓T @PSO=U߿_kc=PÇ={T~uꤓNٲeKu'?0=:F~׻Um߾袋{:v`nGV7p(0=Sւ`}E`w޵/2"\?uN-)0|g1Pg쮻Z N_|ꭷ3<6/<ꌪ٢mǎաCR~BԛoY P%:?^zB(X;wU; ۷ڲeK `\/Fܹav_W'XBGs Qm۷o~`1 @ Z{:MoUW]%D} fo`D+w!DmzM|M`yTe&<|j֭kjx饗 @]M{mV#'t *T F? p %\"4֜uYBzA*PWϮ]}kB#`ٟ͟U{9!@ ; @])gqFuw5q^=zT 7MT Pj&>4@*@ o)Dj:T Pj^{M T* @/ P@*@uz-*P{O~#?/~Q}cO#y{n:&YuNEH8rx953X[N?K8Z~wT@:GjT\F@GTW!a -bې~pFͷ-XX.Y~]5)@q$wmKzL7F:\LTŋmՖM0w_1E_NP?n﯅~;vX瞧>rG PT*T@<??/{m;FaYh}C}lY,+^{ ˉ>\xmuԮOGt ,¬M8ۑ¼&ed[d}ٶM}-۶'7>_.ak].7m۶+_umq,6=_ PcCyYX.e3ƨ +;RRK P%P+~oƱ^Ө R*RyN}|cnv͛T@*@]u[i6B HZSվ_'ۙu!uS P+. ǑϿeo}V95[},@T*!|i |m7DN#TRy+ySXh^nlcX>Aml[ii{=NuT P#(? P`ܤCT*Km#D P̭C4۽}"@Tnv7{Uʊ9O/C P P* @T@*@T PU PT* @3P_}U5T@*@U PuE|wVW\q PɽQ[u\x#< Pyx˘F PYj"6UzUp P $Pqj-@{@ PT*T@*@G_~y|`&1?em߾:x`2w?pg?뵭x|ܷv mJj],ZWU"1u" mna,'~dc5jW5iXզi]TꂋJ<$LTj˴rYJ3=117!B9"ĭ{ض|=j|[SțTʫV T* @.<4lCǼ:.@Md]-miWֵ5usl2w@T wuAk]e6Co*mnԦ3DJ P+.BϺ`:&RmOmj~yZ.#IU P7<@}嗙% n޼mԶl۶mm9̦i]qmuuz>U>o mAd[ / PY5%dz! P}ц-CޡU P 4Mv9v80: PUbPPM7qON>ۂֶnkT@*@wUfS0-'u2 Pk֖?CئJ٘75~W]S @%)`2 CyV] ֶ+@OXƿg6r=bmMqSl7~/˱$@KNM=Ӹoq{9!t-㚖?O j/}ie?mZ]׶OMt Puä3IQQtˊ8pq}O׮kZX~~]WMfU P Ph%`SڧTVF>wxU\nU P+#NCܘ)BoTʋVB ;w~i* @ P= F P P* @T@:M'|ru} 5[n]vU\sXs PjkQF@uϞ='?I^=T@wn"PKV/"@CU7odս+<.KW_}U Ւn$%<n %@Jڿ1#XmO?tiӦ4;vL ճe˖܇B$X]\sMuo{=\!COPIs='@ڻwoug `۷ڼysgi<ֹ+mǎ7׿5".ꪫj O@ ѣG[V=` V@|aryu(40~0$Zx#Pb>ԏ}c`yOSo)@*?g8UFxwO;E̅|z衇O$sF~UO@J5*Q `AEz%ng䥗^Ng>ͥo `ADW_]~ե^;8 /oTu9*R#H=r0~_V7t(8{'z-* @رcp;//GAM6կ~Qw}ɂO~26/8wW\pAE:dS@:W^ye-hoQPMބۿ6o\tI̹]o' رcxM6M iFs @52|@5›pWrAn[ O/s.g1u]7z}SYn<SUj,a9OEZ[%@e1 @]G17jR PC>lu7)R|WFAM6es M '@:j7ᥗ^^}UAΜxBwkﳘ_^{/PA PA PA PA T @@ T @@ T @@ T @@ T @@ T @_*P*P*P*P*PPPA PA PA PA PA PT @@ T @@ T @@ T @@ T @@ T @ @*T @*T @*T @*T @*T @*P*PBGy:vXtСCs+wR}_~_v~~wվB8S38+'x4@{}s{޵TX*<MZt˖-k"RUNS*BU} @UO U)TES*BU} UO U)TES*BU} UOUO U)TT*LR;UOB*TէPQ*BU} PU @*TէP U)TBU} PU @c=VuYխ*@S{ꢋ.o^mݺ:餓ZعsLGVÇG?_}gp;Vu]'< q"ٳgOa 狋p7qt)ޗs @ev 7Tgqh <-[Twbn_kN;jǎѣGae|Z @ KЮ]^{A 8Fpﭷد~Q5j\[f",M?Oz b`}|"LbkxƵ@ 7gP_[? ہ˭}{(XpqvǠdyѼǏw}PaQ䣏>:.Bm 7P]r%$XQQr-_wmPaڢ3Ө\~էMF_,s# @)GgTs.[[?bØvEFsFÇX ¤J]?8"?S رcN$Xmzku /0Ksz.Z-[Tvb'|rꫯ @g-@ qnp0]&0O_?V6?xQۮ۶mU5+*l׏+WnݺU[?vY}#]vYu饗B02)W^x㍕裏Vgu~`w߽wDַN58p *?j׏΃P__״K%*K"x 6<@ k@ ]+qm v(kEt;0:9r5 @h׏+Xv}m*9餓Ӂ5~[ϺÇBo6m"`?aF zc=u=Տ~*Y*PB8g8wTVn|׎n]ۿ?c(xƅ^Xi<ԅu}Rw~wFۿەI*0@ʵGe]]mG;wdtWW5y{; ) yP#0La L$@6:@5* @ee?Q~$ a6qT٫X@%Ԯ 5_F PìP;>y*j\Gv,h?S~믿Ό hEWAc0UFغU]93Zר?<O P\SSv,{VK/z #<,m2ըP*T`Z>< PYv;vT۶mlh `mgyaU*@)@|T6]?NovmKhn2|MmO4 3A`<>M0͹HxySSxio5g޴I 9崭ws`Զnw}†xL~ T@ @eBʥv 濭o04@`)C|)(+obUY]۞~D8tí9TG^;Xлv1lWxZ(4[lS~sg=2]t(ӮXmxp v onc^we4E@inw6oO1ީR3.붡k]EMmzS)U㿳 T@J-Bo}[._be˖l'@̓2*?Nsp85jܮiIJfM5mC^y?G[ڏenr_7=@5* @RسgϨ]nr1 m{V5@ͫ B/|́?oh>O9Fg1(@SO?hק۶mՎ;hgYwgzja]ai^M6wn*a,Q{tGm^{m*j^=xjb> >j?!* @e?j׏^ 1X!SOi+T~}^xσ4W^w@W^y.3_F>z Pc8N͟ɟ,|;a]}ծ]e*9m3-D\Պ}WkT꒷Gp/}i]S6`AHT"@ew 0s07ImMm/w]S R=@׿~BH_D@4>3#\M]V5I' P,]߾}r4QyZV`qjWL#H]5Mb?LOT꒶Gks8G~^y)鶬u:tm~Wg]}咖g$*u:Qkz1ii@UTc38C[?G] R͟ӣ]-ߥ@.>S.>k9g P糾iYܨ5J딮n'";2]?Z9Z5IiUIgQqI "-K7ꪂp/mAf>]|*wiV>A5@KǙ30p/s9 R%@_y>?74}ʗ?]5TH¼ Me9Ш`;ALܦ[|S05G]wN+ HiJ-G\MaoXeB[?^|Y1n:$H]5?yM/Ϩ|^z_u5}Z~>` rC ׶r4mCmuziWa** @޽{Gz뷅t}˪պVՓm1mj>)[ ַ2̧(()X͟/oL>m]3,C~H1-7緝Am0{]tj u&޷u]G{uɟ/3 9i-Mܶk|_q __ P*;v>яnH~b$o PPԺ^]H9IgeźVśoOf惱AlSUᛷiA]{l4Hev"T 6m4-H] P/tԝeMwy #lnr3;庭JާNV}+k},zȽyzzȗG @%Gm6M;@m3i.׺隿t|?Ե;ߕU]!h}n`6 uԼ.Hm]csz|]]Ug4;6:@}4:BY 矽wV?}T8 '-c6Xs6Zm5 zyuK9l iNզi^^;3S`bU Pn\g.jِ22e@uqU5?O-y:jԛßn0#ڎ8BhGuHf^RΓ6IpKqt(6i{ZvקWy P}O  NSJ 9K>oCmkx %@B Gzȩn/*HD%Ngo5ɅzC2 Pc鮿MVyPZo]M7ׯk?+_JuW_}uUWUg}+g7?=y7 ep*@(?f柁C>K%@$ Bs~Vk77+PnT l?Fꆸ+Nt=oߧ n=<{ۗ+M_qמ+M>iQj}Q=qT[n~5Bܠ7pcϡYy:Y+Ժϝt~R̾W.'>k$m]*Cn2@:/"*@4*Tn6uJ;ڴiӨE׿⢽m@(>Bݲ` [&>so} r#<2ѠǦ0k@oAi۾']ol NK1-[oB8Sf P?k /1Btί,+񙺈$_}& PJӦ'~!]$ƍE90~=l^]鎳~~7oSD"` 2 b1e`o[,7_X4Y`1X8U/)mʍ}Qޥπ2_rWVԔ\ԮWT:@?Gyp j7*nsl2?7}1 !/Z-@ͫ~~?aU 0 F`Zw>KL.h;'],Fm6?½/gA<_|CֺϘXVۗM".ofǿR3Ӷƶڶ?s]5oG6ym) อCŔqC˛I+.`ᬃUP%难j : ھ˃˰>74@ #+uhO_}׻WuUz} CTi:~ږVwPWBs8 haVUOR>,(ؼy), N=oX1DU5M#Sj)} R3. 2 PӾP~׾-DݽfNjAn8w[F`cԼkj1EELU]6N#8]le[YF>xI1{ōd_[ @KV矽m!< Xj_!w_&i>U01Ev}XGp* @X9QKE+}TmY[gY?mlW\D9R9@ծg˖-j|P&T TrѪՇ>ӟt_r/4^Clonu޷-M0zu N`vT*PiJtqyu׍8sȑ#՗c 0}w^T Pf'*N*'~yD s|Ali?sc< @Sze9P.YOqg??ծP*΅z/QR мN_wqκGSIDK$1E4P&@y[/~s)PO}?#m̴]?K.D> @3~\ T?~:tw߭?* @@7^}ZR_~[[T |.ѮP*;nRҷ׿څ?^]pG}](@u.rTy~w.*o޼0RuOg* @cqӨFRǏ^zQt0]SzݻwOܮ!kέ`FC8i}ՓO>b;SڴiDj`}PAʔ;v섶̢ѣ]s듴>* @o4]^z饣 [ouabܸ* @T#Gyi_vϵT@ @?s'GEbT&F }* @mY[T$FebT(qUk6$@urT*PYT'xE|vh@ THmMb1G%45p{T*P!sѷ}n9բv~ܸ* @:fRm6lOj 0gnQpaik@ T;mQGP{nmsҮgWwv}@ PQK.?'wyv}@ P PT,cp]@ P @XojӦMՕW^Y>|x1}/]Xy P @eeӅoܹ:3~ SVv횸]* @To1:M?яFm{n_€aBַ&n׏4x@ Pߠa_/v}@ Pyi.~Q[3xٮڼysuwOTuz1pѕG @*oݺ'>7Q_]XhvZӟԹX묉/TBW_8vq|7joi׿ Ka:5qј#_ @* h?3Ggǎ pWԧTB_￿Ї>T}򓟬7 YW??jݻ+b@]vU^z@ T_`FA5\zH>[}ق#`$n(GT*Pʶ/xp)M]0is=]XrJv'M65^ĔFΙTS#p\׾av^z"0.t%$Xm1G|s&W^9<.!i]?nㆧGѮ,{:sHb:;S:'Lwl1Ek٨jZJ_o2Y)1}Qίk:TǏ:vQ"vծBU cXgϞ랣G/OB~{u9t^ĜΕTm֕. .1mXQv}*y8gm?p @*̪?Jwtݨ4c]Xvw݉[K..련Tu(E_>j馛uP`*,8o%Xr7;׵*Pazf: xgO~G5j⼷uֹ͗>߷o_鋜 @;sT-chG˿ip%Ba,{w۝k&֫k1V>6J _]` >\m߾}ji`ż۶m}ϻ&ֻȑ#'[]wծ0 _/O⦞1/|$n\ @*lЀ<7o橭?!).cMxGٻ/;_9ț[0F]0 3b&*$3ʨd$b*41a((~sfש׋k]*zקދ?|i`T|*۷tQG*co^}lْ֭%KթqDq}w34W\qE_L.[ti ؞R8߽ Bo&g>>ָ~\'s5ScE[GL_|$AoV}WԂz_s?>D=C-8MTq"*"/| z`r "KS_|Ŵ@osp3{+~q,bH<PA@>pފGʸ#t~?ާۉ5@_4PA@>K/M9cǎO8A= BHM%>gϞ͎9"FA~ 8q}}4~a{ @@4Ι3';}tvA=*VHpV00ҊST_g39~J MT8[=wI*0T@@E@PPTT@@PA@@@PA@TPA@TP[lٲ;Te˖,ݻ7:#~TˀhѢӟtC]vYV+**0pD'W+OPPA@ gZbƪX}ZHTTPa`j~;TTPA@PPA@B6l޽{.]u{T@@ .l@kZz CPryT4ƟvC@@ThѢW\Yy#zT@@ gҲUⲱڴ}a* 0czՀ+N,*iq8Ȕ@@T*`N5ƪdzf_Q *t5N>PӃGՋiPT@@kA * O@{-TTP-Z4qZ( W+W8T CP۽]T@@ *kYT@@ !hqRq]gq6 *h@ ]vY6F ZѴSZ +ݛEt-Ŀ* * * *  * * *  *T@@PA@@@TPP=/ *  * * *  * ** * ** * * *  * * *  'U@@@TPPTT@@U@TPA@TP[Aщm *̈5kd/6o3gdټyݻw *_~D ]|yj@FFFYfXy@@jjECDV">|8kPٓd]bEdɒ좋.z>@@vj>6}^OPA@YiV C ST@@*TOPP>@@TUV VZ} 'P>@@TBPP>@@BP ST@@VZ} *Xj)* bV * STP*TOP_j)*I*TOP_j)* bj*TOPjBPbէ 5VZ} P ST@@;wfsV^ @_oݺ5[jUbŊl޼yhߜ9sKgw}@@Ç6oޜ@馛={a#ccc{Y f˗/h6>>=:l߾}W|[|_Lc|_zTPa~Oci|8H!5V4Ň_pE4_n ;ߛߗ]tQlٲ,PA@@>G8q_`r?oDȂ^!X*0ʋqΟ?СCLbbԧOn}y/TP]6ߨ'`ӦM(V/;&HAsLx_PA@>׏###m(?OpXu*@`_8/Y/TP#F<}W;X?@ޫ}f ={ ұ~Mh}i"_`@/G"zPA@{|%tC;wn3Ф/Iwm$T8H~+c h`͚5!|IG*0SW>qԞ_xJ0,d׮]B *Խ{" *tp\?Vqj^7֭[ H`j 5"cl B;Μ9318CC:R:bdd${$r>`v=5P/~ c@x se-PBPA@Ҹ~sF$ܸq"(ELw`b뮻 ;v6 *ĸ~0E]=K/_ :|ĸUHS_v@g>$5i7TPqUV2jXΝ;0T`m 2\1~~}Xe###{ٶPA@e8/^!K}|e~@@%9rv *-/f>lvYѣ=c@T@V@=tT֭[/ٽޛ8qi+Tm 2xK,,X@0b?Vvwfc?A~@m3*ո~:}}0#>O7|nl2qT@@PA@"|X),N!& * ұqKQθ~G^fM&~$?+^ * JS"EokOgڱ4\r"MTP@@T<-q?)_A+m? q@j]ve qP@@CcX.>7C7֟5f .k+Kd-v ׊0,u~ *kf_?я%KԵnݺHz7V_6m.Gr@E,aÆl*5{nMZ|݊TT@@>ן?~}/kb,"je>O m@- a MEJ@-v6ʧ/ZhRD^  ҇###yD۾}{_G?_sjyꫯO~2_PC_VjzU*;* cFGGq_W}ҪU^߰X֭[aA/mPk]J+PPA@e޽;Pos:(cjZPPTTz|\ٲeq\SZyL@ۊw*~:_jU6BV/53PU@PA@G#? .S/׊ӋpĪT0fsF5==~A@T@@gϞqggΜ(i@m2i@u}7nyZ*֭tڠ=_ϾX33p vec L Jό/_<_um۶/׿3롇 tr J@mfkqG 뫱fG}FLj6RG! /\0[r\^,IZ"bŊ+]]6l8_=k]&=0Tfn; PTTYl/PmO>PV%Vǿ9h˔cڵk/8Ox{hO#^ V}  *  2|qqPꉃUyޛG{yvɈRvN> 0A@PA@ۗht݆*me>OּLq}/⤯[~ N}޽'a:!uj"߿\3cǎyjE<ύ7~y\PT@@q+VG8mp`@ ZQS׼.~X? /!ujC!ZUig~KOϳ G8cǎخ!>Ƈ_M_&VZR\_l4˗i\&{]jR=ϋ4ϑ|!~.4[qAsСaYhfE*PX8{챑j80bdɐ:5}ZZkjiACω*Pt\?Fcٸ>4+#.rh&v"z@m7 P㵪ObJ4" 2^{m>l\a]U̝;X?LC+*v/APk3U (Ǻf|݈~8O~kW|.TPZ1#؅~O9RiܦrZjUԫKPi(q oo'F-h* R3V4=pU=}USP,zkk5xqaW* *u͈CX?̤KP P#W$:TULdĵt<|t%c9U"E+n3n<<>P״ZWl(^}X@@@u(,Y}k_3Si螱<0z!ujyf!}*:Tsv+PxV+ej4}^(S]tE>gR!VLCjgPH(*堙^3.]ډC@m岵buթ^a8x}a\VTPA@{4VZ)4a i l@Mh ջVV+,,-s'LP_Oa(k~7َTPJqDG u}tWRG/^fj4jtbVPӐ8J'_b?Pշ&#PA@*q 8 Ny]y꧛oYYf|gj8}Si}v*֋qS )<ٕo `Rx/y&:48bOGq jӑ_twy'?Kv5 n@R9Հ/qzZ`G=Z/?SޟaQ4VզBjO@u(obL׿[g[l+>|8;w *Ps遫f2*tӻGg/[_5#jf*Ceݺue({٩S``ۋ]4}ꩧt{W^8o`r@uw˖C:vˠO="jS P(6R1|5dccc"C)\k\Y{ 4Kna'׬YS<ꅱ"e3 rL-GFN5 *@bڟUF۾ई*:b8O ,WXA$~Zq$XyhuRrKSٳgm$@"v;C@-"%\R"?\+RWa4i8j@Hm>?>:QdN+hӛ i)̪]<ױMSq$ ]}T\J@uh~8F?ċQ~8.>$,-P.j}8iˊ?hu\|0'O8]tjw/h%cⱸza/mֺVj_YVYkPR7U._~*tRpjߨ+{2PA@؇c#1/9mPԪXhHzʜn/~]U\uU؅Ti||6 ` 9TtXVn&jjc|m645sZySwf@mj4ISW *@E/_!PA@m~PP;ӕ3SqEE¸>O/袮a ilx.%Kh+x/^,~6i~#5x>˯Mf._<ߵ#cכ_*ycW ذ|/D@"=uo+N/V\[oy8G|+b_w217Ŀ*NӀ:?%ݣ+GTP{?g? bUchſ׬YSP#KYu[5ʷU/ OoFM/Su$4 WZN 2ՀZ\Oܯf?O׏]Xׇ[bŴSPG^PA@J0|_n"E=W^Y4Z5Beru`͈d9+96YuٸLoնX\+:Kܧq}m5Qկ҉q8F]X@Plk~,Jc/nmzD4,jgѱ8}*% qj^lzʁ(}Ǒj"֊iDmPܝէqto|Pڟ HbG_v|C7b+t5gQQ,fN*Z/[gk+Fk]1f4V7N{+ѶcA@T{c*Lu;ȾկvtHX/6訙ZG8{* ̐o} O:4sutVtv@Uڧd@MOyEhz@ td6]ԺPqPfPA@U'N`?V ]j8=Bj`O i(|7j cnz=T@@=~T:{Mlk-ܒر#ڪ8z蚽?2i@M^{r"vWfPӯ[h8Zu}}iX~j~ضmD[{@[K@P:ͻTPX*V6tZw;+LEN"GMCN}Mos*bPzWՁTPǏņiv,"nZvj7XP((V\FLm9-G\>^p([qlmG?} F %KukT:9 7dvjjc7?h"2ƟqZy̽LlVzЦncXYWx qr`%}NӸ(M;uٳ׏pj\P`z agΜ4,VkSciLbիWw$VVq0PoS +8-V6ܤZZ<4kqP#ǸEθsT@@PA@#{oRx≦(Ut?Wk俈ΪӋ9zӫ㴪"Vݯz1"d-"iG(?7wE.zke1<Ɵ1^>8U9rĸ>0#u~PTPX+? Xя~t"#Ɵcj=8p> * *cAbb1+y>VNTX"zwb\?^7PTP!1>>~XjшW^ye[o\ *`?&Uln߾=_+x zO<ӈS `#T@@PA@$XHL#-_|e\nnf * 2bEbLX=cGׯ_o\P@@܀J/JX~o b1dzٳgg+V>9:q}om % 2cņod\sMAϖ-[;c\P@@TӹsU-[o}[_qq]*  GK#X ^m=zԸ> * Xc&6_|K_R/~쪫ʞ{V9sP@@zb/PTPX~xUj\X_CT{_ *XbCOӡm6{g^ G@ *Ь;饗þ}}s\v!\l]zQնM& *8156CΞ=qǏn){g$rۢz?m*ɓ''?Cypj׿;USq}]6-v#Ή_{PA@)Gxocz6A5 "W\q1~b15_{& *?l i׏UajժlٲeB {]_ 8q₱+2ߏq}̙>*mTPͱ{ob^Br۶mi3-ҥK԰f͚|Z/g4߿?;TXUW] N8p>0P.\`Hlذ![`AS7'TP"4Fp,6cZ1_jZ?Dĸ~\q}`ϟ޽[\s|2m'OPA@|zoo{\?VM1~4@wmSB۷ooj > *ty$4_~}{ʇ+W͛7g7x OڱMvm"TƱwygbc{Co~󛎍_}wm\ap=ciST8A:75֟oܸѸ>@^=S>۷ST聱N1_7;;}tVBXEl^hQKS:qP9*Cc)gϞľL~=~o}fޛomlH*cB"^ꫯ׿Σi|Xdq}p]<@d8~7cl7g|kI1 t9I*ЋcAFGG9sdgr\rI[q'N `N>x?_x]|??Y ӳ´)>g?_>?S^KfTq!uӦM4q}vԩIildd$?@Mw饗N|~pIS@@hѣ 16q([0> *^ziո>@E|vS:*Яn:j\`?~<;p}&o@@,^qz3!'.T:gΜ,]q} ޿X=b߾}3~ @@8+zwUScǎJ *t_|s߇ޓTPa** * ** * * `ԏY?uxmPA@^{ayuwʕymPA@{WFm4VymPA@ͮBݰaէ fV^veV P˫P-[4k\Gf-u4s˗ݵk* tjtO#S׻e˖_hQ *H@-B]re~էABDbiUϪqzmt+TPA@j@ZZDFOch9biqH˧;-w+ 0tڵѳfΗ^wyjq8@@TɀzOciF,*@yEi =Pӕͬ>MǛZZ^iZB,* Œ*zOCzOOkTPf413gm&bVZ@@TԾ͜Y4.x PE&Bg#gl*7* B_X-ZDݺ/* B_P췴 *]qbj\vÆ \n˖-ٵ^{AUq;qvЍխ "l\OY"ty˥QP'j\.bh9~a4"kX}ZLqX 3]{bdnrvt9? 0pTPA@TP@@TPA@TPA@TP@@T?hPTT@@PA@@@ * *  * * *  * *   * * * * *  * * *TPA@@@TPPTTTP@@TPA@TPA@TP@@TPA@TPa@f̙3Mh'>|8[jUsN?PA@Q.f͚-[,۽{w:66-]4?y`@@Gr6oޜ۷/k5F}g%Kq6=T9*wPYp]w5r5bkK]է @BOPA@]Ox~p+Vv VZ} 0bN~f͚, PA@P>FۄX**`է$P0wlϞ=T S+K/_(FKv@@B`mݺ5[tmE@@B*|Acǎ믯\jPA@]jE{;Kw@@B! @4i*>Ys ЬBpI5V^uU/Ξ=kPA@_j)*cǎemߟ=lC槝:uʶ$ S9sLvWOچ[&V * *TOhڵk'mG^|5VTPa WZ} @3b_m۷ODw PA@Zj)(oO>s8u ۗ * *TOhŬY&mO>쳓j?6& SZ5gΜI۔7oPӧmg*P>݀u|ߧi@}wmg*߫P>S{TUVЩz ϝ;g{PA@\j) r@=smN@@sէt:ؾ  B߯Bn **TOPP 6FFF %/[jUg P{3<-]45kVo֮]o I~[|y6w,>Ċ;w^T@@ci,Yfޘ۷/[fM6즛nv{PPcL?ŋz+;<6mڔ,~a{ * v+1W_}U>~<:P * vPlʵ"cǎ+c * )NNc#0522bPP* ;5˖.]jPP"`p-_<[j @@T@@mE4 _xݺu6.Pqp?7* j qШ9uTCb׮]UW]?~F * +OW^-(w={gCPPۗ͝;SU]w]gϞwߵ ŋg$bsOjmp* jPr[} Cnǎ*ɓ'mt* 3|F *P#P4۷F ** VTAT@@U@:ٳ6DPA@AP/>}چ ** V9q @@TPT@@T@@T@@P~{O}*=z4Kk t@CPTul޼y~ N" Ah#~}c_?   ~|^"@kqZVT;H* **j[b|*Vԟ']_9کzWw9կ~֏cU@P@@uPjD4ƿӱ{"FpvwPؿ/£*  J@}ULM?V~}Yd}it?zA{(Q#*  "3TC@PPgj˴M*]n]bQ ֏~kq+]k;w6}9ߗ#wzk={|إAF.n+[z{q;(|y<v_fjq;q]7ʯQ܏z+* 4XV0ZXuVV6 h*A+a뮧jmmX㾴mTuiz^nʸצ?o :$5]AʺvjZ2 ʿ fjX4Ubeb[-VJfnB2to1<S}mi<|zzqC"U@T! wTjl@wfV網ձFN</ۮ_.Z *  j42ubl_jڬF1ـ FcߡSm;N</~/^:  F7!o'R5]smtt@}k6T@@PA@/aƍR;וzKX q::[4M4{=+V4[n<95z;}_Rm\|_?ت?M@T@@ij tR>W3G 8=W6*  i@ 3PwXas4VV./* @@Mʸ:=#xPGy&~בV./* q@M~PӀZo%fv|Wew bLZܖ/>T@@T@@퓀VF+Y5YXP(hTChzѲޘ{t:j'~aP7PTP(QQD0~#.Zq+q:|p˷QuXuk4cLNLs/zcqUT@@PA@ЀZBFG3OXqYu7c5:> qǖ> !7u7Dkww (n#W~xʫ`;|_K/;޾ cz뭺LzmNܗFMC}駟mcMPP{ ,fD""E`wQ˗+E"jEz#.sܧ8Q z[Mo|-NT /ZСCGkߺq_P'__1 :=* *  * *   ** *   **  * **  * *  **   ** * N%:u `  * *   *v%\}cwme' >x`}WtqxmSO=  j+|<}"@O_wW  j+"Ջk֬XXK:xPPU@mRx{"5ҫ+=s# :4>6Ӏ珨FX*^@U :ɓUj[ox/By~L'gPx^uhxھ} PPJXw޷PH)?O]* * MƹXO!^-n/W ck|؅@35+^#Λ^˗o7Vzj簙zrT@@T@@PfhV+*zEꩺ:1Ժ9Rr@H[:fw  0L@MS+ v"V, i-V7#V#VƿoˆtEj۫ V6TPTOgiNOof_ZҀAժ3k5@}CDVwPu`rE-/o[u fT?4PA@P:6niˤOG@Mbrܬ)=HSimegzzSY'J% *Pӫ̀Z݃,պ|me_fGTPP'N4;_Im۶-_yUWM Z[1,X`6W^u69)Ի|y?1Bus*To *v%FFF&nԈcۉl9Z[}# * :rt@MP4CjՊTU@TTujd@MS|mX;P Yc+** :QTF@5L=f3݀fLeQ5VTPTuh5:_z2UFaX (v262Pu;Z * *PK`FtjfDqzzPYof7jPPU@#:QUNE8,ZuJGr#cȗN-ߏT74tc(|w\w<4p^[A@PPA@P^L\#±V< W[D_`S.[VOaq;˗e>Ճ>P  ǏWG:O)-ܒیe6 z˗ջ/.ş[qҶr.)_=5/~u #PPJ~P# o b7 b** `hZB_mh** *R[ߞd|_@T@@TU\A$?,* * M6݃\@T@@Tu bolloυ  * * T8PPT@@T@@TPPuJf͚mٲE4&|s֯_/* Hzj~7{GT@@T@@]bEv}Fn T@@T@@ .l* * n޼97!" eW;v {׳ٳg :F3o^m6~ӟfw}ẁ *C8x;T8y @@TPΝ;믿^G{W]'`j~é]LәkLiojlB0ct(ǔ-E=(-8aPjRJC MPHyIr#gf?Ow?"t?kwߵ0 n?իWwnO[={A *AG?b ]vW_}uөW_u *͛oYd! xm۶4:u *6nX|ӟ6t֭[[v *_`HPA@;wW\qESO P0fff Fi{PTطo_^>OT(ŕW^Yr-ݻ[,Ξ=PT/B+ g˖-e]V^`^( ۷KpY)ɁLT@@v;v<K/vw=`5^#/6Qxyd=83:}G ):u * vȑ# >1n+G-G_wu: v]x]@lY5g_(y   zUa޶圀s`5OP"DxTӾ}^{8{ @@T@@],fѨ*СCӧX* RQcD8%ҟ[w ̙3űcPFPY>{)>\?~: o"R8rHITPY3H?w~* *t OPq *  ** *uTTTPPTPA@@@PA@ **   * * ** *   * *   ** ** *  * * U@@@T@@PTT@@TPPU@TTTPPTPA@@@PA@U@TPA@@@PTP@@TPA@PA@U@@@T@@PTT@@TPPU@TTTPPTPA@@@PA@U@TPA@@@PTP@@TPA@PA@U@@@T@@PTT@@TPPU@TTTPPTPA@@@PA@U@TPA@@@PTP@@TPA@PA@  ** *   TPPTPA@@@PA@@@TT *  * * * * 2dNJ[;^v ²xmb?-=쩓MwԈ{LPPTny{Cw^__߳8<1,^ڕ ݃c@@TPaFQ4՗54iDz5bi|Gn+~O)6PA@>@@TPaFѧ *j)* F} QF * bB5PA@P>@@T@@(TOPP1 ST@@TXQF *`ѧ j)* *t= ST@@B5PPn)* *t3 ST@@j)* *> ST@@TB5PZj)* *j)* *j)* *j)* *j)* *j)* bF]ʃ{ظqcjժs_xǼ"~glܞ={ * [n-֭[W,ڵkחq'K MʟkK.˟;'% b[D]tQݻwT@@ԥ@Nc[:OR+W,a? * .G"?-Ξ= z wy@@T@@k?`@+=Slذ0H5.{zallq<* v%\"X .;^s }4+V&''߀ 5k֔Wq b@M"Tڒ[D#`Aر(T`(jkF8PA@Զ HF@~ I{:6&q T@@mI\,h,-[7Nt]IcԔP;+W,~򓟈FTz!@x| * v,j#E¾}3?'=/9PTZa dzzza4jS_  <8A i4Ą jg T1* _݁ ** ֺ   ."X@/T@@P:;;[SäQvSSS͛  *0uffhՀ}TPT`j$M4hBi$j5kPA@PU@614㤏 &'' SN,Pai4iLͯVkTPT@@P Gg* * j* * #P;@PA@U@TڀE+5_75FzA@U@TڀW#jJTPT@@Py,Ma4?2,Nj6gPA@PU@>V#j3TPG4 @"P9Os *|i{OA@TPPU@T@@T@@PPk ڍ8dyVMOO;&TPT`ěgmjj1 *P#QtŊ%#PA@ԡ _җ-ݻw6֕W^Y~/moPKKS8DA@Ԧɓ=#7p9~9rƋ<öm[o-_~mug]mmܸ}|?ַ^%PSPujĦ; m__Zy{''q^V*}/GPT`8j> KPS#X|,TG>Gb4ezxIZ WW\q*j5O߀ P(ϵчRP; PxyL*jU\ *bR7Puj[ M1JW  cU@Ԫمi1{* .J@M)PG+֊ݬ* Ž * .^@j`4uZ@Z5k8pfhZmK5uiugCfׯ w~OWW "Gʹj]Pk;/~ o%"PKMTNK54S+k6yLjۅ4}"ϧF_-^XR]R!FY5zf#:2vޥ>F6 HC?*0*#P;a* KPcdiSU+B6lVmXk-kN_z#mwTbn֑ͷiPyNuɆZQ>7Z~B@F=* .j@̓b_d'W+5vRV7E@M_OѥvP]ʀ{׋Z9A  P\jP5^vA Ջmw.OX~\K@T@@ T<) IKPc}ϸM*&g7Se z:v1@NǏ4[U@T \5pNk@X.kU@]KqZO-.V@ ydM]k=^P;7;;[LMM\G *B@5OgN)9#P +_r@ 1+/V@cj]ǃoO@X<6x'"zA@U@m*DFZSS+u)9f@Ŵ3vVh7j{]^kF* vPcT@@m+m׿~Nٿۏ/~q1~ᚷƯ竡*\ݶw׽Mlslݿצ֮]L+3,{c{Z}m{T`Xd166V|eDu9@@U@m)pHfV"ӦM:\s|mK/T7B5U׿q7ZOV~ ~o-e@{ʶ :5Fk֊tAV-ng>EY@Coɸm<_lK;Q-._"zz_ *PSPȬSkyj8B$VG ]ZͶ5zx= }m1W/呮s/f'qNF.U@ktFX@މuQBRmmG멦uU7oTPBj|daf1,*GTOֺPzfa,nW}O%!zHU?YzU_6Zѐ\)mk6;굗Vk~ܾ|C޻pgM" Z[\=(Ąc$PA@PQ"PciwRſ7TPT@@P c_G-PA@z *0*5;550rrradi>5?nS+גF:VTPCPcdis14j@VoFF=** qP#tVih#U}PA@P` juKJ׊1_@TPT`(|K#PuMiJ *05_(:%?"i6m6PA@P`jZ4D0K4J4,l{:m6PA@P`j>ҴLpZw5zA@u(I#5OCDuR@߼ysQ/(U? LX4?k<OA@U@Tɀ ** *  * jKbrMVMOO;&TPT`ěgmjj1 *P#QtŊ%#PA@Ԛرc T`jNQPU@T@@sss #OSPU@T@@Pc T@@PP3iS7  ZQq1)** ffggǔ~** ffffDTPU@T@@/bQ[5==TPG-=z` {@MMTPT`$FvTPA@PU@:* |gqAXp/  ׯ/~FoT@@T@@;7F}c֭[T@@T@@ݴiS/~Q4\uUŶmT`j\).411ŅVX"R :j{3h,^@>NMMywAQfff|cm1 :Bpq;T@106* .z׻h"O=Tzt޽o@P :Z֭[W<#Plذ뮻@=~@@^ hmxP-vQ7>hxΜ9 :??_NxL5 _<V#0hPuQիi oM74 *jD]v M0fggɦ랆89{@@TPEXYjUSO N0Dfff5kִO>T@@ =z?^|;`|-R8 0PT/B#qps7_~Y>P}-S'7 چ-[^ziy8ܹ'?Y\}?V<5uPPxRwuWR/۷o/^}U{8N0?p~.+PMgΜ)D|ː.2DT/xPF'|\Dq}v8 ) * v~2Drj~EyNJ|?--e]z;Q^Ѧw( ڭ7x -B̶mۊ;vtrXmYauiLT@@Qj1}XP|. ĉEk;F9SSS-{A@F6!U@e0ą^uԙbŊo^ЖkPA@c6jնw^J@CǏ;F:n޼01* Ә&MHUœ~S^-*Ѧ1R&@@-.GNLLc j_8uTqı:'? 8/IM *”}NA@TPPk@=PA@@@Hk8PA@@@MOO/B=z_PTDQ   #P4?!7ov * z]ހi'"zA@T CP;% *v4 u@T@@PTT@@PA@@@Tԥ233S׼`Xނ U@`jlG <%l&n7778TPA@PZiiFQA@TSSS.-R * 05,m5:)ZTPA@`hB 7є  iZӴi *v4* *:?jlKPA@T!]D*FrG>R  q!t!Yc}PClKuiUTPTvsM:VGEr1+*֭;'yPPA@`Qmܸ񜀺rJPA@Ν; a׮]  iPcӱr=wڪy ąP~s'sŊo]PyD5* @bt%\rN@}׻Uرc!;vlj *,͛77 K/-ʀzȑb#gL?c[:qQ |E /}9TPA@`^F~7PA@^DM֮][Z~}k׮%9MS۹O}TPhH 64"Rѱ>TTPk[ 6mˀDH~ *^eݻ2NNN  @ *v4 u@A4Bj+mNq * z]΀#NhؖvkPA@voGwԳN|^{!ƚLO * ~ 7iqo_{u@=wWwTuX`j\lJ@T/s_?{gqӀ݃)l~x\9PCZtzzh> * (zv""އ>_5Ŏs;ʏ_}ʠZq>`jZ4͛nDy{TQS1.4-?8IҘF*0bPe)PC<6}T0 S>rMF!5>Sc F} $)4 *F} T@@TB5P{ѧ *j)PՏud͎A@B5O/x/>ޮTP(TO~S* QF05knnѧ QF0Pdѧ *j)* *F^i)* *t3 S5XX ӎA@B5 ssso](# *j)=u||9#PA@B5 hP>@@T@@:P>@@T@@P>`Tj\|*~ PsѧJ@m(VXQ$},>?;;TP(TO:33s^4m$BTPaG} (͛77tll<* *;w7M~x_NOEظqcq7k׮-V^}Nz/~† ʟ8fh8jE'''mju #xo߾Xn]qwﶳXf{E]TF8[n-K/.'38u~~~!NMMqQQBT6m*V\Yjժ;۫ 5!֯__>zGsVCӴF#O"bTPaȧG .9R={<'O, swoL>p@@Q1`vUTPA@!t^x8y7`iaAVZ>gP@MYt||~PA@a CutROӁ[?(pqPME@@t~Tz[eZ?@ڵU3sϕ?-o@ O[Q_@jrrq0 °L׏h4{yR`Z?@oeSεQ{)1s1=?F֒im} €+ƁE=1ȧ:4@{#D8VAZ5ISiTiq2/v~W\Y]?FwF|gD! 7`Z?شiSqmI0b˖-e<<} *,4FwɂiF˗^zi@@TPGcgoQkƲ+"Z<8:>>^u$.j*'t9,#/Ҵ]v١#c啸$m=Pq/DÇ dŊo ^Б)DZ -6]iV*zoZ?0 袋_]@/ J4#P7oTPa9_{EFa￿aSټھ}{9mW<š5k)$1&&&? *,tOHXpզCiÆ =(}_-㎡:J5Fڷ *,Ȥ8М)N8Q9sf$=E u?~uPCa԰|8nTX{)Ǵ}9MJ?m۶N<)2;NTX7n,#a@;8_eaw_ 禦LTXNt}Qt PzP+* "9xt}:ֿyf;A`f@U^APiOCL巏T[^$t _(ܹ0: 3n%Fͤ G%o =Ͽկi 6`DbV3;;[0T`0:(F[s*Ћ֭+E$,~^ϕDovtQ81 ,O@HFmTP,ÇMoPC0Pa@^@%TTR/N 3 5Bi7u~~B+Z`0h񶷽m_\|_PԞW,*`7\F9kZM6P ?oVj,w/"r}}vU@ZzTPi"?p4cd˪UFbZ54%,ݍ_IQ@m/WMkv=~ǩfm7 y g#0Z .H`z{޳?z3=b?c^~6F˟sK3/=Wͷ#>qv}[ ܧQDOz,DU@]ѧ,9cPA@e?.FTDZkgXU.1LzbP ͞G^Dx<֚PowoIrZRK'k{FۛOYoL^F!5>辵q'&exzTٔNU1Y6 HٺukUOj>2i)֟1.  2"1.<ҥY:#>,H|Cu|c]5.m_1_KXo{}di4h5֊r [}N:Q~T⩀_0F%V׮ ca.ZɿEkixWkOk~# \R@Pc Yk^TPA@eG;v7걓Qy7->VĠQt7m0FLip:UW5Hz#cs6j4G){Ű$ nFTԺOIFƪ`jf=J־$k_}Jkfh͍8fk&hU/ݿ&ܧQFC? j/KM>o q%Tr~İo}[=X+SD)iQkcjMu'^ѫNvrj~Έf5Vixu`iwqGrʁf7:OF|4~h[CdoiKUG}c~ͷz͖I>f}:J@mA@P)&1T1RD6f؀ *C+NBt{k|EsO1bwϧzy@~.űqs>ۑz۝Im:zj}-Xi;[yNޓV?Z_W>z^|ki\ĩzRP>ylǯGeZP;ٿ<Ѿ>6~_^k>%Z_[moGmS[}|}fu]FnӊZ55:~W4&Yڌx * t;Su"* OZ_[5vO!6}І~Pc Z+5|FX'05`;nw|/RFN@ͷ'AQK&gⱫ>FCmVDmI@P{%J3Q!F*^c^Xl$gW^lvwO%6{Įww};Zv>iuj~_<}#Yڋ 95Dhp}Zq\( jŶ`* tkgkN_Z=臀^59jRvuZ:og t[ˆiQ05R>݀^PګUG6I}r=\Z5^$ * RCl(xZkKP9iZZkZ`Xjuꚤjjuq: Xr TF5 ^P<70aYDPiV*ong }̉ w1.[6vPU_;5_?Ja}ZZ/Sjډ5Q,Pf^[_u$N!FlgZbNcr|Znݺ/{L{3^),CYnZϳQDmA;hj«NΨ"*.|-\u ԸT) 20'&&z'hF.1M@Ojit^_"#]J_j2;;[6ɽg>ųKR));coW1: =\̪Q^|h9jڦx!ZvjO>_>3:{$j5@ۚNמ?~zׯffکtjHogy@@%S#J},9RoU??posN"G?QMv؆V_Fۗ<{XH]k܍Ni^ڕeD][A56c[~Ecq>Lj}Tu>ε^؞-qMĿO׬^W?6H@]1W;EUko *={cjt{1dĦՓ8U@Oڪ'+tT5?iti@wҔ>Nڵ˴~#羷P"c@M-~G8XGtjtM2bvXvi{&=j<}^V*Tu~jtXc8ީ>~*.4CԸm|4~PA@IA\'\8QZ4Q NX笞8_/6 a۝NrZj=WiTOlCkvsr lZ?􁸐TjD]:Sm1bzku$h'Mg;_G+QW@P[q4Mٯ7ʴQ@؟ *}3]?t~''Kzi0l~?P#B@v: uj* !8qa |Z,Kg;^TU@T@@M׏1y)C?i1:]aX_k)" *j\D*.:e_ *,t81]{Z,UrJa Am'vRT q!ZI8^Tv~La15,X´~X|_EvC:ŽQA@:;;D#F h"nb ʒKc ib atevB PC$Ma*X *=QLa9]VB P͛F6PcD' ׯ__\s5CbW_3~gQHPj>jj TC;>& K6]?F?fpZ[c8*jT@@e#NÁ0C`Ż>w *׭[WNC6u:?G? R0 ~qVa ? @ C%냀 *.jժQFB7Y\zŃ>X}?űbmއ>xoE @ DbŊRQX5?T@@0qƕŭ =ˊ 6O:WܯYT@ N1~TPK˵Q_x_CLā5k;v= tЁnlQiPnrUZoi֨}' ,~#nrZjFY ~_^lݺa8gϞumx?\p*jF6Z4>cj^TT~u]w!u˖-ڵkŦM#p ~x饗F%-g;8o~b4-К3'^[p*jԙsFs|jj? *,ӎ:p_}WWΝ;\/qPs)+(򖷜7M*>={ST3I8MEq* G@M4FOFHVQPA@eB_8z)kZu45mKx={<{p'#'Nppm= 5SSS?g7EXQPA@eYڍG-,K/-xCP#_;25EzTw_㝆Y)t/\T@]fZ\р5ܧ뛇ع9_ y7Ç/XM7TNq/74XFn߾ꪫ[nf~,wtk,I8PgЊ~ڗ X;'R??>/~8APx]wսm\ĸ_ܿǍ_ۥxidܯ{WZϟ ǯjo|v5]?0]zc-o_pAgPu0h=U@TPQS~;`uMC;v,6_P֕z꩚qjj ۑ?_5zvN"OÆ ݻ;E ]q-Y8PTU@TPYi19tTSrD0boD|hnnD2?QqUOw|T/>*#ݯ\狏}cŚ5kʯKubd% jؗTgoIGfbLۓqV{1M_S>#6iX#?.HK|?  * ,FH*);<2ƉT~U=L'6)) !gm5^mՓضAio/.b֭ގq?0<i4K>3&i?.%ymkcV.% PP@."% *3^;gZL~_N|fFT'>I`}`[o;ZI[ܶ_NL#q]M׏iA@eqj2 UZ;՚mQ Q<4Tg_4Mڎyu;KTGsmTPA@e8p`@lnn:O}^x4 ꈕF3YѸQZqeds=W\s5?q8=x`q);xPY[6f䑴ߋH̐~6Ԭ$O#B5Pa 7o.GR< 2p?^Ĥ-[W>׾V^NkIRkKNl'換Z#TrͶ)0][o-?]M׏3 *fz<,_q Mommgeu-jPa 4Soi@me`j୊ xqRi8ȑ#ٳgA@`ͧW/8Q4}:zcfTNϯ~V g_CVTU@T@@b1:D8iqe8Ѫ!'a'7ϧ'|rctB<|FI,'UW]UtM嚻L?s挝9t1zUVJ@XX v2j+vt>ejE]U@PPG@Ӆ~^ȟMo_rdyZ5x\Xkjx]p닫ضm[4?#hv< kmq茀**մ*O+XrNbzVdXO'=NӀ]1wXNl"(>tG*j[>W@PT@@PA@#O>gZ\1}rr???dNҕ{tT=| X>rk+VO aQ5KiebXnML׏ }1ClwfYTU@]cccŊ+JSSS@@wvu7Ϋ1cѣn-] 8AJІ2h>)!CATR\! FS*`A@&E.䞐7}=3\sy}~|3TPW@P3E8OXqF{ t]C!sZZR&0󪗍4^7].j ҥ ѸnYcn[Y+PchD>h8Ç Ԁ:5ƦꇐcNP<m_*Pso.+dTR [ 7P\}s=h' ]NeXa2g\HE4cFDcO8`w}@@Z Mq2VV3h@Dۥ-=|ukRZG@PM<)w T3ic.N.~[?K#"y {)N8apE C&鄌 P=G"2s!j<=w~!uS[uj]H?@@߭?v7bt?nLqd0b4ѣG ʀգ !S>XP^3>uR5uuY") »b}ۭn*>O* -V1Qq ]T>P$VOVѰ>bim$mcd  (w7 Eb{'"bw+b /0p8} +0 * +Fa?O?OM];]81l=j@T@@PPYl[/Otd0￿ wuPvT@@PPYjG)]HE7a>D+[ou5?yAPU@T@@evnq£.n=-R\qO<1Ԫcǎ<ms=W@PTX6yr}s L&拓oj{wpO>m<&8 v%\" *ԉ6m*7W^mwŪq_~yxo>9ƲeˊzJ8wtU@TPA G}t&'2+N80͊+G0wb8j PA@>w$-_Gdv{)ON$[ÄÇ f͚u(><PT@@:EL=XczΝS;yxGUqRX;h<=p >'mF@PA@X!?TMbumXm4U8#mqif׮]* *9up ŕW^Y CUc09r`L+W$qq88YfvݻTTt|'K.#D8m;Wpew}`m޼HPTPL]wUu}M$WUznjwLI^a70{]@PA@>Ŋ}mlƱn2LF Bo/r0N$[~PIw6mjTTPldFPw@rNLo]{8vVB&#JvxmUK~ ,K/x{~< s* c[[o5oq_rQ'Zb֭CoCk֬)-*l;;H3V *0w=!+b/~ed 7&qR|pp: Ǎ7X<L 7ihZ  X:|ponذܝ~T+b+*={4?~qN,VPE&NWPwVM@TPaw돍|UV]tQy|҅l۶+>dn־q4m. *Ln1^zi~6?Gޚ$PA@vyLct ssPΧ7̍_e܃o_?>ϔO||xfcT@@>uv7bhmP/첹Exf+qi1J@eγv|xPA@tӿ+gWw_]WZ} 0߉'86**4>Y|PTܹԊk.J@ea=嬭8PTـ:jV4n/kOt^ɭE|{ć^!V* q7!p?hPTPA@ 5bilH4.bkU| o/7z词/* jPQ>O2M+H/_>/EXe[n5  zP&VWVjэ7СCEsq؀l]ܭ: * .+AchVѺ^zuQwXT@@Eՠ]VFwlrvrUJSTPPaljR˪|7ؕJmƊz TPTP$P{> l mePۊێz * qx{'f ް5_?* *,l*#H@1ih׀|F4yPTPaW[2-&hOdͼKzwo+WkSfvjXcfkx~je޸^\..WΩ=OPTPaVէX:ZV" "B](V`#k>E@PA@ ^j)V>o) *Xj),*TOPTPaWZ} ]j)* *L*TO`P>E@PA@ ^j)vէ*0P>ѮB@@&xէ0UV" „BF STTUVhWZ}  S*TOPTPaWZ} ]j)* *L*TO`P>E@PA@ ^j)vէ*0P>ѮB@@&xէ0UV" „BF ST  S*TOP^ *L*TO`P>E@PTUVhWZ}**0P>ѮBU@TPaWZ} ]j)  „BF STPA@ ^j)vէ* :[n kז.yPvbBVmܸq]۷?񏍳//۞ݼyswcc  ҿ;wׯ/-[VCu`4.VZ5wwg+V(6mT:tLKW^]D`Z"oq۶mT@@v%6 c)6"RXz1͕+WsNquי(k-GMCK ,}Xǫ\l! ۼys/|(8vX^x8y 89O>fL/_8p@! ꄊ /x7l ]p ĉ66`lR|3*V?޽{˕  NN`bWNjD&_~3 5ٷzK@PIбfC=TtMfݎ (5\|X9aeU@5>*b۶mml㲝{Ŏ;ʉʹbiժU<fw]"ѣG @@u]ܘQ 8j.byW /p^Hc cS/nypo^=y晹qv߾}* 8۹sggcB .7]==u]S U@TPΖ-[ũSJ1'8Ӹ{!,+V>[獳Ӵ U@TPΦM/6f9] m-[V?O˗/+T@@ujw+1 $I]vOƄ{+8#8zo{l|]@PA@C_|q)Y X8`#{ x!' :AÇ *&uNAiC@TPMꀙӰk!kqk' ɓskэ{PTPMIq&qkьvPTPMIԽ&;`Fa jRԁ0 +T:-R}+ Ns=jR'0 I_/韖&r6kl2T@ۈfR'v|[ߚCcJci|Soo>mV@T@@5{^{\ ^zIkiYGYߏ)^tc jR7RL-ƪڴ%Vԁ{0x.ɿW7}ϘlPPԝ8qE裏M>;PbŊ/~1~`'_4Ƹm6cZPTPM&YLҪ;vEʘ ԁ31. r}cZPդn(V 3)U61) ~je2iIkaƤ3V6]zٮZ>} Ou>X+*&u,igr;q,i*֘ u qiЀ!g-\jxM-ִ&&u`!oX4& zii; Atx9fC { qLZPA@5cP:')4 Dzi M+UউkAVbRɖ0@I8Z}.QCjtIAVZ#?DTc  Ա(W'yX5)'2*^uP&Ր9H'|v5yčǗOx`1 AǤ|ϐ+^AqB淕O4q\o+pո* jRG.x&yl\5MIо}Zw[cqmAwĤ:6Rz都roqBCPc{]A0 I#=FX?'hۅ??knxDu5ZY?vC5c5v~b m إL{aV6|6kT@@u&'u䓎~'?|ev*~:qdry&{0T0֎-׆b5wͮU>]rL1X+* --ʎ^XQ~L"lH&8XuOSǏIkkЮcRאXA>OUgqY@5 &uc/~1qiNzE~&Pyԁv6|wꇕAjWM+mTc *&u)_ҏX*ZjRZWV 3Z=BX:jPU@դn'tvς[ B5?85jRZ=\Nݸo@]cg{uPV@T@@5|ʣm5F,c5\]4n:NlbR 9ȇzbfdu~bcJqP@5 *&uSy~_?t].:E*A&J&yn:V #;;y_]V:zFZٻwo=XKI]xL>'6-kTT}kˣkjbӴ{~4D2#L&Չm uUZj?p _*O׿i8ki1>}xcFC3&UjЌw!qcajPU@uDǏ3dmkO~r^D?Xʯկ~u2O=s͋ko}'n<3I]NߔZjǬC1%^uLqigfUkh~6qtݛoq sbiZPTPM^+}dM~\m1&Di#1Lx 0mzu)=߭ɞՅӰ?qFwkicD1)"j|4Չ>E.q  jRdb*uӸ"'&ZIA;Bo*&qUǖ#:)&k7k)ƣQrLq5w}i|t{vA5k5Bl I`R1&wmq0 *&ulL꺆O7;6kc jRLp Dkc jRLnpw6l0Z@@դI049V6kc jRLϤn!ir6kc jRL_zvibX kTT:`'uNW NöK?O[U@TPT`&u 0閳cxTPA@PiԽoNaz.1***0 :OacU@TPT`&u OͿw'Wz/***0RO,m0Z@@դIN~6|iBAc-`PTP~Rw19]&u Rnc-`PTPM R<Ɇ ZX+ 1}n9㎩}n R;m`* I}{߻=);m>QO m`* I]O~ehqY `ԓ!5q7k]>l9.;* LRKԚ& 2R𮧞z* LM&y~q)TU@I][H÷ٸX zWV:MZ@@դn8' xNBj6p0BoU^c HG< r<-czg^׽3ksOcRW _cc-1X\  -j@vgKLm+.tuεt1Q.n3].n'n\f1ZWz~swנ.twy_e璘u]>8lİ{lٲ۬{,k=NŜԥ?vJXY҇H>J0\z~?L.3L@C?Cϥ9 * Ij?'ʵm@VbEk{P{|Dt~=b1(U_I^ m˿mܶ4Vo{=Ƕ.B1N4F(VNj>t{l{ϭ_Mwn~hKRhL}6Ocv0\kzlbm* ԘF}.~{Rzi5K7u)M1'Oz,FH>x1LIi+x.vdi9zz^un  x~9ߍ^֤P>lW߫=1>kTë0=m4W%on<[a }181}8g<*{C@PTPMniU$~B zmcRug6N^+M ^so7.Ik%x?@6~WJ~.m47痉1{}H0Zao]kVڽc'Lq:Ӈ:<,s;ZP. # y īi2+.<k@bԶ4y@*6̃I=ه BԮj][\媿ߗ\r@U\dq?V?fc] ]s{.iA:> vc5x0u!?Ouw*V\W^}qYV@TPMFxq<^1a ckRTP:vQ޹KЌhZ}O'6z@>C4  MA@9DF@/V')a&&u`g{n\˪ʸ\<~]D])j.{>ݾPTT1mMz@'Tm/ǜ煈uqT .E@[]]kq n,^ewzmez܃G@PTPMFPL~FJs5.~c:A]Ȁ%X+|*a^QLLX;5;kӱK1Oz=դ](jRm9H@w׏(~^'U@PA@5a@Xyf(j>atUȰP*ϻ8-U@]o0c$ԮwW/PIĠAW8>L@um!8<?N * jR7TkV U|hem['6S=VC7k8aT@@ul&u^R }{;vLMytMO'&Dm=o>o~Cx{1٩~^Gz]/W~&mOU_\~;Mv.=ζ4qy,b"K%ύՎ]Ƨ{|u_'mϩmu-קQ<ƶ6uf2TPA@5KشiS9Jl~H56o5Ly~93!⶛izϷߟS1NU_?~sL8{2s5c,DAxoOMq>{x?]Dzxm^U.in  jRԁxmőVqX+* &u`P%* I`Ru& Z@@դ0c-TzIc jRԁej`x.**&uI`* :CPX kT@@T:@@c-`PPM:0ZPu G]vِ|.^~eT@@PPW\Y<6d9rKq #JU@TP'Ě5kkژ|s+6m$9_ P~W\x  NwqY`E]TlݺU@ڠgz⪫PT@@uRl޼m #r7>Ϻ馛TPA@۶m+Ş={}CS;jժ"TPA@f۹s ZOZ\s58;cCҗ=(]}c=& $Yre-Pr-Ž8;r ʏ|# o30b¹[رJ@@`Ť^h Ǐ!g`Ţ㟾* 8ٟwyVկ~U\qpI"07~qhaÆ ޽{Tqg~Gl Zu]:@E?Ja\и~ؿ :xL]t̨ӶI]Ox};)V^=|P(.' }߶ 3/|I4VK}2 A4DEPA@ w]8pK.iЅ#G!_\fcC>}Z@PA@5I|`|x'zNfeˊ_ރ`F<5\i8  Όw ^"fN#,W]uU}vC0mVc_9=zT@PA@$]w]i _^wyg.>|,8W^Y}y?qgy8qqvPԙrСru] a:*nS<է2qe˖kQaJi"*T@@uv1?! S$q뭷vЅcǎټysYӷ~x:MOT@@ufOvq7 _ swuWx'^ӟ>` ?/֭[;rSPA@yk֬)WȘdq|O*vkq.'Jbi{?Pԙ>Q/'ť^Zp Nb1+b`2o\pAǛo} oQm?{xc;* 45V]x{^hz:B0>^zbݺu駟kBDr`1Y]_@TPY#d~ ;wPYI\8tLf59rطo_nOXc]'r4PTjQoFA* *ɷ+T@@TرcGQ q.PA@qyەsv  l/]rJPA@6o|[lPTPmCO?yt׮]* *gŊg>ݴiӼm<'NTPA@`WZxzUW۞|X8TPA@`W9r\eN  * z]PTT@@PA@@@ * *  * * *  * *   * * * * *  * * *^TPA@@@TPPTTTP@@TPA@TPA@TP@@TPA@TPA@PPiiӦb˖-ao]mxPA@}2^|ƍ{ PwQY8KR DE/ KK@ybժU˯_ *L*&jtٲeeT<{sNv2e>۱TU`)*0Pe)f֭b\Kxq~ VZ} 8*|OIǷ˗!Km' UV0V^]vm=mܸ^/cWoZ0+EXqz VZ} bl)NV7.a2<ǒ/ **TO1bb>nHM9c5խP@@&dէC@m S? *0P>`jW4?0a2Kn\j_?r_=@?3]]!=V-_dVxqJTPa*VZ} t㐶tGø^+?b61P#xhm>vo\mB  S1RԮ)v]u5_}Zzvm7)6W^ *0P>`j7u ]g{$Q)*q_TA*0UP>`wᯋv|Ef:qT qvm>zȏc<֭Rr@@&zէk@]̓HU^K\ߕP{cUV6LTUV &N\tq yW"M~>mW{TPaWZ} z.܆ XSM_>=mn@@vէ1'Ur©|Wzt,C]@@nէ1 ڎ?f:J+KcM @@rէ1阡yČظ5_5<#*c6: ԯByubUf8*nܸqvǙ?*8 @:u5VmVܹӶ 0*TOZ1A#Ir/T@MLUXr * S&!8aѠm]F$DDSmoǙ?zUq=t^RT@@]4m͚5ŲeJsNށO|[늵k6m((,c v`z!'Vb|;)~ߔe˖2/>h@vpRHPui/ ;vc$j|UVq7 1N!:j8fT_ӧ1!5V[.RT@@,?䓢LǏ˿XA= s~!@@Ջ*VFcD1 &מ={ؼy f^QHPA@e~e?VO0=Q:_}Y 7 *̋q(tk׮QZ~UHPA@=+V({1 Xٸq fҖ-[B* ꌊivۇpu/l\0s~,X@RTPgȦM ?a fK+60`Lm۶d7RTP޽{O|If%/Rq%ǎc(N&H Xzuq0c/>`q!0fPB X&&yU/xc)*K(I`jQ>l$R* vosէ0~_'x Ֆ-[SPټysL@Ô_~y.he:6l&P=`Io:)SG[0XYVs Iم?}<<#h^:$@vzREǏkO=7<|,o}{vUkV>0I?9NPY ]VRVauEfǐ|z̃5g;P\HsʶM!@@u#lXM2*ͥӺգMSs:A~B}i= :UUǦ2~{#*oZ;5`~T@@]䀚κ%5Oud*t QvuMXɛn;?, NE@We"K@rXugu ~PC@f=Vh_gZj~] ?kz~ZJKԉ  rBni 3U@P#&Ns@XmIzV6Lԉ2Xq9$  P{ sƀ|i*tKn#_:k*:5sw5kNOHcG淑4Np5}-ǿاM+py =6fs5Ա yp^籧RlZYw"|1P ]۞ CcmU Wxf3Zs膴Z.揷g?۬L;7ǰxHvu sՀY~ML+?yQnƍ#5mI m4N*SPS([Kc`Դڮ1[5J7kЀVKDea.I^ÀmuUt!gו qaNx%!?fW]"jR2&*̺uB^OR@ͿkhXe<PuW鲻|וjjxWcrFPߓJ ռ]f3uѶBR}uPI)Yzq>b: kN ϡ^j[: :\ B*  JѦ]Od5ϹԴuj{ {tT-Ԫ.PY߅~#V>Fq;]ϪOn#̟߯o:uSeϷPj{c?` * $vY)G֦vjH'~MJ@;P(j~xa&]BԈN5+pɜ!1oT/iu-z&:x@M}gXO@V"h9.d@XT8ӍW9# * .H@Oӏ-t@m:T? -d@w 5?j7.[ mij~bXZP}مmOk jm@=vXgG煪6˖-!^׏6]y >.9=w1cu}~׻]޽{T?=5 * P_}yO46y PThwtB* J,g:52Mq*?e]zcc@WnFLԀZ}Vuvym!K(Su+::c6M6]nz󿗶?^_]* }_]M$"P[4V ӊ<\źicz٣i"t>* km; B)4$͖{-q[5rz1[5_ߺXnikϴi{{?UkӲ5.0 ^}́ *Ϊ>B?q0PɸalI="Ym7AcX,F@*vwVeTߋO?C B*  L/{zRTPX@=r o}P!TP*`(w * !0m) * @ * T* NT@@P,^-**E,)**) * * -:t` **   *   **  ** $ԃPPT@@T@@PP/~0`s K.-V\)3nb* ˋ{W4f|/֮]+* YE#`_\lܸQ@T@@]~}q׋Fs=ضm dŁ[]tQe<Pԩ`bP<wQP? :]-[VP\wuE.9r@@ulڴꪫ#rnqԞ Tlz*^9s(~Gzb{$u9 /D ?^tM4:u?|jϡzK/4|d9؎ֳ :gZXtS//֭[WOcӧ}"pf4[Y** ˝7oY0znh}cBψP)Ӄgk, )>hd1 ݻ;An}C0pKQ/"i̒p/qR$ zOy  j??txW:.'M4;v̇`98^?u85G@@@!Cc&Zmm +sx}v?4gN  *ccu]'.owƍ>}|x&Ōaq6"`.f~#-VᄏzLui/y,+8 i{ũ9~Mm$[ޞ~PTPgͪUKL0Gڵ>V|}ق)EF@?GX+̺ևPҠR/ɷWfPeźE0oqt{,<6\mc2D✨07lڴKk׶f]ӹ8{ Iycn]P]ṛ {9P\5;<|x}mm9v N*zΉFx`rŠ9Ox@م<՝3YP.y(bY|]f}Tֹ)$sO P=Ӷt>*c?yq%t ψQ09^yLkغukx{>x}g'zHl19:uIft5XNӅʡ|<$oKϟ*֦= * ܱcNJ袋~ů~b߾}z7|3C۞4Nh PۮK Ì\+_NyqSHN9_74? @l_PA@+*mVu]yיojɵaÆbѢE[oeL2~qeu~ٱzirq8>5?%iMgV=_ݲs5 }TP'JwYmwqG'D&<ꄘ?~q9t,^6qnGy^)a6[pbu0䁮|%Pt噩UsZ kf.* Lrt`A5MRRMaxovmZDԵktq*v2|V.480)}SZa@@Y5oyYqS`TPό).t6jCaM-_$*36@@PA@['N(v)@ L={HQpe,QK~Af]Pl6MlͣtYZӈU Dҿc1J@e!w Z̬|G;/aDPx̳Py o:|k߯Pi9=.* ӧO(o.N eǑ#G|fMRxx.4rѲ|J9bpyjȃ4gF/n|ۄuHqQ-T@@;1mf I0٦ 8톟QيylR5qX5~)gv;WlN M3@GPٰۙyT~Q:^"DPxkʈ{k߾}Z:~Ccu8rK,BaUTKA04&_)ʇW=zLŴQKuHqju}ICwM3O랯)O+6}-KBHPٰau޼yv4JL?pkٯWZCHPṮ[f *%LUHPT* *; *T|B* *0u޺UHPA@PT`z=T!@@T FUHPA@նCo0p@RTPA@` :3I*'#_~eC B* *%~w!@@Tf//HԘg *;_hTzvYq1TPA@`TNQrŵ/my)  TCSPA@yNT@@ŽNT@@JhNTPA@-KB8PT^)  SPA@ * PuOp *e7NT@@/)  k)  T9@@TPPTTTP@@TPA@TPA@TP@@TPA@TF@@PPTT@@PA@@@PA@TPA@TP@@TPA@TPA@T8{iG1s@~Up) ,(֬YS?~N߀tٲe OT\uђE.ٶX{+yx֘T+?Xxq3^jG}[fw047|䮙WhmjDo}W3s߫iA@eZgQ4k;F#F8ZP#m>`t"E>O,r /_ޙ9^D q['Ă4TB)O,Q1TBi<5ٞ:Lf #j): SPaP>`g} *t٧L,TOA@B5Ij)0Yf0P>F: S&y٧ Hg} B5T,TOYf #j): SPaP>`g} *t٧L,TOA@B5Ij)0Yf0P>F: S&y٧ Hg} B5T,TOYf #j): SPaP>`g} *t٧L,TOA@B5Ij)0Yf0P>F: S&y٧ Hg} B5ԉe˖bÆ 3WmL;Y ϻҶ1mm۶ sWf͚[nokXhv[jU=O}77.Xxq}wq=">7}w>Wck;{+cѓ=`NWGX cy… _ެ0G~sgMr$% oF=i&cZus4uQDPhD꫋8}̥sWfAv4p7x sBL)gbG5PgO^{͛ˋ{xph˖-+VXs>L'xg?[:uʘV@;yokϞ=ϵ|.q4u뜢N@f{׽` \XdI'N`,ZXv0t7pCgL3Q@5,͛7wv8f0Ilҙ=LZ=XgLk&: ,(z-o80 ƹd|S`ŕW^Ӛ$ݶmۊl0vU\p3;{0bم^X߿zr Yib:T>`qz… {n&8p~ⓟ@/~793ݷo1:Lb7bŊbҥ3;ogϯ|_|kӺP:={t7{!΅|%C,W1Q<g֭[N@ 6t|X s9gTPW,?|Gu֝1gڷ5k+6nh*co˖-Xs<|c=vƘ6.(gڷt)o0 ϸTؾ} c']@x )_HʑPN~ hm]8~ _u]5=t1ڟ  ̈HU0V>3őC\HM/qo.EΝ;l0&v\0~&i`L LĉƵ 0ڀq`L Lѣƴj;'Oh .$1-0ZPlTq# v6Hw4q)cZ`ǵ~v v6Hgi`L  v6 ƴL/şv;q- `G1\yb~0-'L6S~c^q- `G1휎)%]w]_3]@ŸV@vrE+..DTz_pPcvo:E#<{׸V@͉'f`L;y^|řWW3җr|-z狠81^kzݱMGuv55Pl;0ms1;R@PkPl;id@CO?Šg+ wkT;ig=n۶ YK!|x@ 5ź]u{ǧ&ֽˎ(?&5}\ v6 Ӷ٘c}Lж5TPf)9kܺ窊6*?"fS@ Ga3M?¸V@hE@pbRlt65ِ1s2器4y,$issſ|}i B<>ݖʪoߴi1<7nאk 0Pl;0myn6MQ/-ѵpwޮi{Ԉ娚cq,Ѫ9^Ws>qqjg %3.&Uun/^÷sM3VB_ݹKa$5q6eFXn'fu/4̀s v6 -\>4\иO?Ԉ@=w1׀*z #\ƵƵ `G3|3JU3S55OO@#bܿuf@M,wÜ:s v6 -\"fv; ;֝ֈ|&oݩua>G6+0˸ָV@i-~P7@~Gmvm {xF@eHP{mPܖooݭOu%NIDvucb}[69詧L7m&źe0˸ָV@i^C2u?p7ssY:)+fVjG׶UQT0Pot3WMiܤ8U-#=ioAf v4?w6FaT0P!3 P@Phhh+Ww *`\+ C<|* tS'o˵9Na,3Fq;; {bOZAB  j/3P}ϟttZ "_|\dhL6!U@kTubVb ж!9P٢Mh:yiN)ngjf=|?:|_/sjVݧϗ"t-κrL/fuwfZ\ʷ]|n<&k y85i9U[@`t># hŸ2DU7iWu'q*WwǾn3VcG3s^Ӷ { m]mM2"jv4 2qOs<dTո/iΰȜmiRT/1Pԁ!olwM?|c'Y~xLXWu>6M3/XAΏ֧~mLҶHiq,j'ֵ|zKٵk x_6p: c MP"v#J'q|O<ZU@hhòGܖ߷,-kz]^_ q`sM4jAl+ =叉/Vu(yx=m_CkH)sξƵ `L;ҀYxlfˋ iaSE@c)-ُԦfV-;-_um Mѵ*3iYU_תy@M<~? v6 ƴC c-}uHكM65?mP PS m_.oYmρ-泅+ݶS]ο&ܸƵ  Ma-gZ^SKWE6,Pˇݯ*P~ܣf,E:۩i[, v6jG1n%jypק)XyaԺm^5N>$~6z~|Ƶ>|` ƴ|F1ΙYv5,#_<*޴y@=<|mu|bV} Qz]n_m!an3ZP@@9#U^䑫%ZEu* `L;5X%vj%6gGH, v6 ƴC y$}7 nmҥK'&vu@-&)~P֣: *`G1Ԉuga\zC UYu:Aj~o@m:]BۯS]xt.nPPTcڱC_y啞X[ 6[@mZNmjSDHW] m_GȟMD\r<7nQ\@E@P;i2}b6h~ѡ8:6~?W,<5}w;S7ⶸOzuI֬Yyn=s㛾u#Ӵ=6i5ړ\v*Gi>öjYZP40m-fFt.wd1K2"\)6&kcz:s4-'Ba{kzU1.֡z.n)ox-M۲j+}}~C86q{:Nͺ v6 ƴ `f3b\`Lq- `G1-*Ƶv6 ƴ v6 ƴLv@bZ`\+v4`L;ĕc٧ 9x ;iZT;cZ05P|سg70+njG?Xl߾x`Fts=W@PgѢE~o0`^:ҸV@3wnvL~cZ?骅h@?jƍ;cǎ0Vlҹ7Xs>Lox衇i=jL+P'|қ@CCLI.(>}?pqwtO6P{… ;z34-\~^xkz:{ 9jD-[xS7pC͎;tv0lڴK[ӾƴhDX+p`yZS&;G196!(.qVcxw8'p`XxqdɒV;{nO虨8o<sgBPaΝf É+ŕ ~moX@?O7:x9ַU\wu*N1] AƴzNJ.4x㍝{v4`.я~9gbDCEZzuOcZ gE:߸_HOqK._~`+qG1m!`?G1={N@={N:9'bk"cV\ 5TعĹ?Ow^q 7=P;w^;Ġ+7No=ά<1cڅ vޣm/~۵k gɓ';ߌs͝P;sNe]MA70<_|+>n6gtL+)Rt|NϟoL #{,;{7tS/@T݅cuXp[ظqc@8_`fƬ/Rg\k0eF2T@0\0 i`$#G;v 7?~܎uر3NSLQѥ,P'f6jNH:z NI)NQ8fm߾ݛH'[>|Nq٘6sQ?PTLP;x~عĹc veJ({K.%N'NNM#39ydZF * * *  * * ** * *  * * ** ЏxVt#X:vIENDB`borgbackup-1.1.15/docs/misc/borg-data-flow.vsd0000644000175000017500000033100013771325506021110 0ustar useruser00000000000000ࡱ>  Root EntryRoot EntryF`?np@VisioDocument SummaryInformation( DocumentSummaryInformation8  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~VisioInformation" ՜.+,D՜.+, `ht  D Zeichenblatt-1SProzessEntscheidung1SDynamischer VerbindermmDesigneffekte.1 Umgekehrte geschweifte Klammer ZeichenbltterMaster-Shapes0|_PID_LINKBASE_VPID_ALTERNATENAMES _TemplateIDATC010497201031Oh+'0 X`lx  Microsoft Visio@pgpVisio (TM) Drawing R`RP>w PP>u P>d le !K]oei (fT׀'50"fZOlO~C.ߐE#LQc:U  APn,34%5&6e&9Eaaj6ru8X7?!3mnv?? ?8t= =>URgL-Bak4quBtiQHe9DbT!c#1-r$9Ah\`(*'ut+A4)%%u+H>(>5ah34LQov9V0 H(MmcPr?`8`?=H9D?0 AEN`r$7U3/E/ 4?O@ Q`__J;a/_//\% a$G[A a i%&?8?J?\?n???O?????[!%_w} o-9aD/*jo|ooiAs2qR?QcWOF_cDU/@}_o/^#5=DAt5?=+jt28oFz7@o'9 O!6K]sH{O(Od"<6VHqS]ňƖ`!2PŷyϖO_a re'F^#Tfx0T"ɘP6?5ᰱ !5f| &-!|˰q.бܿ՟HN贁Nk?kGzHIGVAl~J\?u@ ϫ弅.- -L-e(hzyա{}|V̽t?ńxr>a#KH&_y+I@kQӄ\nπϱvAI"0 7AU_)_;_M__]Pp_G/L kk[xKKtV /(A&f,'Gр߳߬iI 0lcF]{3`'a1`Con@ect@rW!@ig&`tz`7P1O*O``H^3D}8AGOAlTBeM,@lMd3Dla`OUP@tR@e;n7Fa9ABO_hO@QT4 Cp5A"_URH@uJ@dn1@8E;ABq__C_H`] _rERSzN@8DJ2OrBN@gReqG` oSe4!`@o&g]1U E=AhouT;aJ@sp@UrN@nP@y7FCAB@ooklML25X%&qvqvqvqvqvqvqv  r1r~M/V+@A`B@cw g;UNb!wIWLVw b !w//+T@yQ#5Gą1UBUUUt4Y,b'@~/@  8C-9  AUBUUUt4Y,b'@~/@ c 8C-9 *7 A"#$t4Y,b'@~/@ lDG A-9 : ;UBUUU'(t4Y,b'@~/@ GX8C-v*7 AU)*U+,-.U/012U3456U789:U;<=>U?@ABUCDEFUGHIJUKLMNOt4(Y,b'@~/@ GC~-lkGn AU+,U-./0U1234U5678U9:;CDEUFGHIUJKLMUNORSUTUVWUXYZ[U\]^_t4/Y,b'@~/@ dC-袅T/AZU^+,U-./0U1689U<=>HU`abcUdefgt4Y,b'@~/@ TFNC>-nG7"AUZ+,U-./0U158HbUcdefghit4Y,b'@~/@ DCKC>-nG7"AUn+,U-./0U1678U;<=>UBGITUZjklmopt4Y,b'@~/@ XC-1/AZU]^rst4Y,b'@~/@ C-v*7 Az+,OtuvwxyT{|}U~UUUUUUUUUUUUUUUUUUt4^Y,b'@~/@ j5C-v( A;XUvUUUUUUt4Y,b'@~/@ bC-9 *7 AUFUUU^t4Y,b'@~/@ >;C-vy*7 At4Y,b'@~/@ DGA-w7 ;"W  t4Y,b'@~/@ 6C-$U*7 A+6t4Y,b'@~/@ ĞC>-tkG7"A+6./t4Y,b'@~/@ 䖍 C>-\kG"7"A+645t4Y,b'@~/@ mC-7AZ+689t4Y,b'@~/@ $C-ܥ7AZ+6:;t4Y,b'@~/@ D=C-wR*7 A+6>?t4Y,b'@~/@ dC-4w*7 AU_,-@At4Y,b'@~/@ C-Dw3*7 AUn+,U-./0U1678U;<=>UBGITUZlmo pY>Yt4Y,b'@~/@ XC-h 7AZU+U,OvwUxyz{U|}~UUUUUUUUU}ijwt47Y,b'@~/@ 7C-9 *7 AR@ PR@-G NR@t.G MR@tGNR@pHR@~ZPR@< IR@*IR@(7PR@TNR@̝2RR@!GNR@.GNR@-GMR@*G_NR@$GIR@#G$IR@TGMR@TGMR@,\NR@NR@ =NR@|NR@#GNRH<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(RE$-GMRE.G[REGiREGwRE~RE REGRE GRE)RERE<.GRET!GRE\-GREL*GRE,$GRE GREG-REG;REGIREdWREԔeREDsRERETRUFDfP h(TPYYBUe@xTVmt(1B~ٟh> @b(P9 Q# BAUF~? ?F\.u?P6 #A >AA mm u` ? 5AAA/ i#F/8&ur#rI$>-yU5 HLa##԰# `0"$Z'1/45 13Vis_PRXY.chm!#5?902q00- `5G)#6367?9 (###&A6/&7 a"# !=$/M=z@C71SE@7366 2$#@C2NoNk?\C*GB@CX_ @w-ab rTw"R24RI'f_BIq$i@A159 MX@G4"]'2qN`6!41fJmoGɊ_T E"d"d2!!2 t2$t3 (^c*5*5 `y8"`K}oS0ten1&2`Bx36Aq%jN`zp%^s Prpzp]sS0nue0mpru6261`_w~cs}!pBAip~5T74SewFnkpipn" atP`SwQ0ml n`VwISEAWIN0TQX]` A7AnPgnE/ISCW0SI1v5dD`zݏPq USpatԐap|3CyEpd.=uM%`zsvsZ1sO;Yia2t0bpgBn ;Kn0*Tg9A)gb1lrs5ZrKa0k]ql=]tWuf0EQ0 Ta) 7qՑȫW2/]rҲ!qoiiA+cqqA@?CR$U!2%c]yW42?()3E%&]b1avy8y22p}NQTWOY0K0NsE0X1OR]IQQ&ȣCΌ4 pC3&ГdvrWߧfl+F"vBd%~Sr3x3%2"w"c % 65HZl~7V3Gm ]ex%䏢rJ`Umr_0RTHKg#%7*H$ 'qIa{a"qaid 1/UF0VFF5^Sw"S%RJV&%MS]T:%'Vs|HSH5Z .V F"cS"g,tYj5m8c11"!س41g!!2X(1T,@2y<%d`V3y8$dפQRRq" HSVuE5<%@UL/^&q-\ -5/y82b'R_3nCuLPPR#.Wя2P"`@@lcDQoaURZiQ7E5/gCA0'(<#Uj6j6bLTްrTcS)Fa# bDF"x4[ O|Z3H^E%nNUO.IQ^EnE  M*MI2O5yN3 jR-pShYgUHU# !v͗)uBGF #w"MB ]Ba\T@oTPUEU5"#ij+UFDfP h(TPYYBUe@xTR9NR[h> @b(P9 Q# BAUF~? ?F\.u?P6 A >AAA AmmG u` ?5AAAAIi yF/8&ur#r$>Fs(??FůbX%%̖@w-:1*5w)4"I#a Iq$񉄰-#5 HL3=l4)Z# AD5 1`Vis_PRXY.chm!#590o23 2 `UA<:%G )C0`EGuI#CG?9 EY2#6J6uO&7 a21A!/=#AECFF B$B##2NNk?! >Wq@a% F#VVVE?5Aa%R*dovh534w"g5koj3g23f5Md& ,5JemnW_"T TU"%2A22AA"LBY3 2(^ E E YH"`K}o3@tenUA&B`RXC_Fv·5j%^ Przǀ]s3@nuE@mǀrUF_B܏6_A`!B]@L'} V6lX-1u. Bj2u2Z2j2#u9r#ULH/MZ1+B#AD5 60`Vis_SE.cTm!#20AD%`>Copy<0wigTt ?DU1f@M)@c<0o+@'ofdBUArX@Aad@iV@n3@ WAl@ef@R@6AtBvBb@hB@lBAJ@=## AoG?9/#n"E&444 7I0#AHB59_lj#"SV6U8*l>(Uhn E /J$&9"(Ln eg5Le#pheo'T ll Js~o/|R12'"U7A%O3W_7xr5(-6 `F2 xqu\J@Q:0b)&`AAs)@E T@xd@Di e i=M]UTP;7͑4U@ !@ү&P lA4u7` u(Jݩ c?EDe347 D    4ũ2qL lvF BP(?7L04 `LineTray sp r{ ncmyz۶&4W` v%P t e n&$4` v%W{ igTt&a` v%Rouy dw"g&Al'2A{Gz?6>5Q?An"/`Fw lo0~/+n"j?|?+5/z!44`STa ow?<*+` 9?& `U 9S5@y0eK} $KXOf@Qs{ 'B $KYO 64`9M*@gy i@i04Ai '.4 `9Dw AcNl:>k$Uhh@2Q2QE,T/JH> 04[_q_[_o!otgU__o_oTEooo,o17>Te\Lsc!2TtM$t7tt ItEtURtUttU  U -3":GqTlan{TARɄքeㄐqJ"4T c 14.m"?7Rpsf0M8@H8 MEJUU8!r5GrK&HZ&H͟@G+G3"?GERml&Jlmyome>mGG-GAUV>6HԂyϿHႻFSP6HiKte (=MQ_'0Uhv$on!3` LDnFxB'3` A@iBݞ"4` C m@bAxB}lewL9WIFE/HU= _6p9yEW49dFT( s>#d9>?B T]@ekVo+$M X]:aGl)+ ]UFDfP h VTB UYuU??Fxha T,aaUQJ\UF BP(?@?F~?$-?P nLel]V^ ] $g]#Q"U"Y*"]>"~#}&&&Or&"u` ?PYu"bl""& M&u"u&u"u"u"u" ,u" ,u"4,u"H,66t#\"u"///$u)a}r 1'u `Fx3N3^3n3~3333333333Vf4bVqfH0φЬ u"Z1qٝܦaٟ333'373Gc ƢEt&EAxÎ;ܢܗ"3@"3."c Ӣpp V/3,S/e p>{/ RfSIA62abZHaLxV3Ǫ *:Jz׊ך@'OgEa@GraĠAQJ`FWO@rF ЀiC}F@`I@)@AEBsZeȓ3U,puӐe(p 4zTcNV:fcfR:bWKBpY!UbSi@(h#FtϿF?@ ЯqRfa+_i&7=akVQhaRkQclQohaR2brlˡF@#I~urW{ 3~@J}O qO Xy`I@Ё`sRUڀfK@ڀbooOlw wKuK`1s8zK@JqyozI@ eoo /u& s!x7SlP;walPe,0kq @Jq.-8r cJqM-Nt 'E1 *dbt+r sQJQLŌH{M$0tb;{O,=CƁS=ԯןƋM2'Kx~{׵am(zD1-?F|!ǓnJ\˟ݟITjU˫ˬ7F:-˫˴YkIՎ_TF BP(?UQ!@>1 Iz{7 Fgcfb@&7 kRka},1ff,1~,1ff,1WVHf312E1kaB12lQ12a712 ,1HfWV3UYkaO} mlQYBaYOB ,1WVHf3ka lQ"(;"FHfWV3Y"]#kaS#" q#lQ]#"]#S#"QocjQWV3,1HfFWVCaHf``xd䰈qGA8rL5k08QL5 Hk0L5k5L5|0V!CTBL5 = |5QQ5&t? : =|5Q-5S47 PQ zqE4Hd7&qn24Dp)TU+0F2o$0jjSRaR9Q#HeM1sB=yIZR1s^lZLªV_4pQL׌$H.5Oa 5. ^fapnfỀ3p` T]`avelShiftH/$xa_d@a}3p`O`gfs`m!*_a@@ VV%Sr V:67sGr:2!7s[r 27.i$3s 1T$y@aP@dvkQXU.q.'2rq⺏y!,0}Mv`n$@qqt7v-ɏxI8cʁl@byf%!r*򊭄!DŽkqԄ .A\.A"rkQ.Xa Xa.IᒟcFpBT}/b.񜗔*r.񞱔1?A?A1˔(Aؔz ..񯑯[Rx a;`"[TBf"< A`B`sc`rdu g`f&+o]`m`t`l`g`l*?`0qH/.:jx +\T`x`bdvq_v!L5~*ϫ16DPᦄ/);\R`Ht%k ` cTfx8wQ02xQᦎuݿ)/&\BanAM`_qx2Ϛ0dP*(O`a`*4FxgyQ2?Wʷշhۯ-K0&4Fb@[mD{ǿD!As 1ϱPGϠYϋ6ѲĢϩ5hA`#^ &AsEy)3UĻɯxAv2`OQnwzT9ߠ)ഉĽBTfAvH`U ta)Yp!3EWLnew)ncbĿ Av9H`R#t6Hl~)99:'@ϟo$tm-t!BT)ங'7İZE9Bs&1)/I4+r/o/Av(* t/% *M{Sa#/?4Vr4F:?^?mAiCOOOO6O=nO Z\9?K8/ ` GAAtPT&pa=rb+ rt*P@?#o54trp8tb T mkqőԁw`3PtquːtcF~ϿF@g п%ukp`?u %Vrq t@V4`AszSPsT3 5jːqi!stgq xʬŕhtbrQ?l !3'󿂱/ cː[яJakr}`Ѡ ː *ː}<9ᙓ @ː^rlA$Q2U lQQ؉uhU+24rH1yɒ^rx@7.?@hzi?wԐH}߈7!c6z#o/|U`a1H_\"p1&WrBdeKTv7.Cԟ@V@)`aGo ,o>oPobo1qyo>TooODow QS+F搞R6);=[v+rrQvq>LUF+q StFUevS߅x$?~&qޗ݅ԯ1).UaO|㱩ǢKT*ϰd4)'ߕr|$<b/WkNȏ%rQ|evʭVlߟU[#Իg.gaxweۿoo1τCϟSCS/e/w/S~eB]v KcVO/GXidU:@ڄE-,/wb/cƻ͟E/?'?*&)BOZlcƄdUO7c*&tG);AEOb1ҿ)M_b.ϛ]!_R8Vb=x]@('_ ݂?z-sT_Na2s/.g(O]Sa5 _;;L7pwrra2wcCB|WV.gWi gU1gtO|{럘p*7 7D g,l;;.o@o*aa Oo0jo2kJΆb`b~Mk 2(s.®R%nd\Sl$Z`@K ay!Tt1K쐴^VQ2CƒUdUpς1aan9Oa7Pp7߈y!X16BòÀJaa WE$1߂!UަQbЀŦ72Ce$6N@¸ջVcIpRbL(aO[Ne4 (ga@# f8 \ 2@dT!bu-(;M_qgֿ`hT4@?F\A`0ΉjM09 Q)(3b8ye/˸?FƣE?Fp5Ys?F5&!Q@Ή( 0̿Bԃa2U0*s3?+0QԂ2 k+ݓ0]'%Qn?Fѡ?FW^bd%?FԊu-Q/c,DJ}?5^I,QI,$~//'% ?F*^Y?F[E?F?|{T?c,|гY,vݥ,-C6*~??b'%OZ`?Fa)P?Fů$_?FPv'͚?c,տ h"lxp_Qҏ,mV}b!)?/L FAOSO+'%`0DLJ?FNqVUO:Oc,~jt7A`,׿p= ,?7d__Kh'% QuU?F`4p?F?FLADX@t]_c,io~JY8F~Z4 qۄV$N@a__'%X/~%l3!?FeY?FJ~ӗ oc,z6>LZ4?kw#awi?vQooj'%'-q?FmUa`2#+?F(%oc,Yo }j GfZ4 ץ,ClL^!UFV{WE?FcJ(c  )H},""3\ \ -Y@qjǻ )1̶ Ւ Ѡn}δS m|ݠaQaCS@Gh4cfUpHi5,)֝F3L&DAPiЧ Z3*#q@8|`rӗze2ZՀ #5.?\zi{B Aȧ#ޯ&&JJ\M @㖿+Ģz`rl%ǨΩ)1π8"4߀ߒƿ./1*Tύrˤ]!c-=( =5-52Z{#.,"~T Ȓ@@ 7ӄ`Ofrsetx%@p6'<g ( "^p7'(] #5GYk}Favorit_en.s] 2DVhz D BASFLO_M/.VS] 2DVhzD XFUNC_M.oVS] 2DVhzD BLOCK_M.VS] 2DVhzD CONE_M.oVS] 2DVhz BASIC_M.VS( I*pnC! qU/Dן_=Q/ן=OSO PrPSRS/)#) ~  !"#$%&+,Otuvwxyz{|}TUUUUUUUUUUUUUUUUUUUUUUUU+ - - - - !- D- E- F- G- H- I- J- K*- L- M- N- t4&d26@~/@ sC-Dvu A T !U"#$%U&UU"#$%&'()*+012*367t4GY,b'@~/@ *uC-Tvʯv A"  !T#$%U&UUJ !#U()*+U0123UBCbcUdefgt4H&d26@~/@ D+$wC-dvx A "#$)+,Ovwxyz{|}~JUUUUU $%QRSTUfgijx}~+t4dY,b'@~/@ ڟixHC-@GyA;X@LFuPR@\FvPR@<'GxPR@*GyQRH<(H<(H<(H<(EFzRE]FzRE*GzREԛzRUC`8\Uu?Ƹ1n@qc7@ LFDTyB uhn0CT w0UFY,b'@F~/@Fx%@@p6@??0!s#?2?/M8"mu ` 4p2lBrBu|IgO7+ P D=2!!" Tp2g(T}DB @=2'Q%7VoBSw2_="@`_{0B'Qp53[CWB(P,~zFF?6Kmo $BU fs#3aGQS3aP` Verbind]bPQTb _` Coe`n]`ct`lbe=5n-o?oP%Uc F_lus`dc`a@r`m`noaaow`h`r`h%oo0!X` B]`srrc`f`ue`Ai}cUC`l ruweAaq Q!Q!"Qa(D/1:!5 <>g2Hu&%rA/r!f=5x %xp5x5xxxu%xtqttqQtUxQQt!!tQt!tt&!&!ttTTttrArAt``xt0!Q taat&"t)At, t$t:!:!"t3,t60tD!4t2gxq2xQAZ"!Pi)jDq ST> , 0!cQ|~Yf!!ex}&!fgrAR~`$% 0!1:!%D!22EHr!$}L.E nBrq1SI[}F_< AL,@P(W@@hl6@@2?@ZV™?SJ[rq$'u}@`FF?uՁ _0b 0pg@VcB}244 417l7r.);M_p=2l1$|<=23{3) `p22tRX`:!BB)y9CQB`)lBQ{Q"GR $    {GR$ RU&TGSQ3U&4ؘ /o.ƒ̀!BpA{Gz?%Q;?{ٱ` Srd ra q&lazیQ"//` Fc`qPB`t`^aa#SRu`q'3 >` XP? "߃1݃1Q-c A`c]`n`s?4*B` )-?Q<`U%5Tbn`pr.1cy;?O<#|!`)-OTK" +S`y`q#2 ;Xfkps]`qH ;YxOP `)Dc`oAaib%`)Mbniri`1[yx2qP?@@@}FP(?QBr#"0Lcbe?"xb $u`,?u' X֑@aV FV '!yDF壮$` URrnbrs" 3 `UaO?K` U ?S.>pesô2;Ta.BbvHzqecka{v%px8)ٱ=TF vw6QQWAAx pU{`"|mH>~_¢㢱!F!!gўrivT,,@*!09~p**4!CP)A_aA; p&s$*܊R t^? @дF@ؕ@*"ev57|!)*4s{ D"4g%5EE1د%'s%wUMew\$8wdZ&RrVQ !RRl+@r˓Ф=WQ0@\Zep@᜹ @\4U>@uBdaq-QBVPiPգnroMu钡$'>t<82<p#ue5+mFvգG/Y"//,/>/{*F`0(p////?o;1[/??+?=?O?P"u???`--wF&cOO*O>N-8Ϧ&8.q(:Lgyӯh䄆{@ƆHQEatt: @@ `@@p8@@q#Y0' grAU޲s@wѤq axMSU &41qldcx*pef|34T޲HDDDUD D D D D%L޲4k )bwrdq0 Auaq(Ѝ`-̀A{Gz?Q?t`K F@l!2*3BG+6`9Ml?@@`uR%L@eD%Wih\Jr` %RAg* `%qxqcdҡa+"bGvсaGāzZsxe~??;CAmEÓ{?O;ȅ_TcbDbD`q`q(aa"qqcsu5O~XO+mCѴ qC(x9mCZUZYOGQS_e_w_ۅRl4K?M?t-1Lc\MRmC뱬bww d(qq"ٴTt,@aau71bp@F5qfu`ogqaEjE␂rZaW+=ruL pu p+ @FbI3l@@2[@XfNgB@0a%dcos¿_=沎@`{񣰁c72Mї%ff@ $֏o@,hŏq3ҘMɇcLyИ בk DLИq'))asυϗϩϻ'9K]o߁ߓ߽R}kf)m5i'@jZV?@Nq:<#@@?@hQ F??,AaIIhI¦IpH;K}_m}‹•Ox?s~PbtʓţUBBPK"`BtBO?S7g*H<@TDWX=A m幅hdhge%1ikakg8qEq D1jZV?@-Wu"@@L&d2?@@i_9]ֿgƿPh"ϱp8o"@tufSv=@`uA `zpArUguq`} ?A% |Ah"Qs0UB ׁ[g ?Eq@0sNaNgkm? tNLC;zsA "Q#x?C&BB?w#TS'&&&U& & & & &&"""d_2 &3& 4\(26"222??22??|Adgw~Oà9AsA{Gz۔?EEQk?@J al`C`l`r2!J" sOE&` S2a`owOCOO` EP@tbrURC`ET@aUPsp@r*`n`y[)_;_:C4a`Ic_TE[B KS`ybڇE*"` IXO`f`e`Z [Y_}1 `EID`Qai`W6a`IM*@gtPibcRQkO5tO(C2q`?\000F B_P(??pUsa* ` Lz`nJ`Net9Mssٜ>* oo @mr{acV31a{An(rTMaAWJ`iv`h&g7ryOaYgu3a _q9r@gu5a$R|@uPd&rt;rsE gua=ba_uZCr t[Q}O8O@A%EjEso?@~@rb?@I5?הߕ? iwwOL$cO9(!TsJ,J/Rş0|o(?5)\.k?֤1@%µA>2Paam ejRB@`mf9SQvo~"_=]@`{i@­,(`%'0cs&\!hSQ+ 2ETJ`x&`OBbcnFRb'gB/3` AIiE.B4` {AmKQx0qEA1kCU@Fy!)9%޷UujCPʰ8rյ}Q^ L{^D>+鍖+m3,@r)8A15 CN~G@@gl6@@8?@U??P(-DT!s!׵%y/ y %my-/TL!7'a9YӬdl?@@лh|mrb[sE%EkE@į@ X `ABu[E_c3B.B9OKO]K!`OqOIOOO(_a_C_U_xy_______ oo-o?oQocouo oFd#bCV>1A2ֲ"A2 6.2_232237 6 6 6 6 6 6 6H6ֲL2L2 R6p3R62!6$}qL߃U`߂‘qłRL%@AaErO OINOO6WgaN _OD_qaOO_O.@Rdvд$3ݿ%9K]oρϓϥǼqu˼6Ep졥hTaq" D" dq+ N 6qs6ᠡᤅ rF Y`yч'M6sFh4@@:@@JRT?@ vHfMnpus`u 9Aeu V`:aj pu:N @@}o=!@@~@@il,-_@)p V`@z3e p .1<]I\n6r5ⲨDΧ%9ؓGa/uw"tNzM@, 2;)2  ݢ83\6L36ܣ.2T`3\\\ \U \ \ \ 523 5#32d gq|w uH4` P"t r(dJ\߾//*/&(é4*[Z,FtBѦѦѦѦUѦ Ѧ Ѧ Ѧ Ѧe٣]F ] a">q#uq7aF8I[ewYp y,@߂dvߚK*q>qxbr EE2#I4F,@L0 Ap[IOTFL6u`/f:a1:&?#AuWI?h@=br[uLo0uo0{@Ffbu d@2 -?/OAIUOgOyOV,!5S ,! o'o9oRoٰOmӾGuNJ_O*zq @d,?wQ@y"uM0;Bxze_̴_!A2D?ux`*0#۟ XocQuk#?^o.h$uu`Ms&­kYtRDdlrڳH<TDX0ZA0QT5hT Yp8gki*[Oq䆱HMjZV@@2}!@@D*JR?@L&d2?:j$r%?PQu`u[`f@@mQs&#DJ"@@_z^g![!@@}o=]g )p9Q%$ 7Op@z312 ??"pE>1_6K?^?p>$D4U$7WQrs`BsOos+!G$29۰r Cagh"g tN9Qzs&Q B*C&A>CB*B!BB)B4BFFF F F F F0q>C^R4^R!B^R F3F4ZV۲O~g0pr~̀Q;A{Gz۔?UQ?z۰$3 FiWlC`lb2}1" oe&"` Sad`w1oCcJo\oW` *ePu`tbrnf`*eT`a`spu`r`ncykooc4PriokDb pkS`y`ef*B` riX@fps`tg{ v{Y A `riD`8qcҥ`i`g6`riMu`g`ipi`a{K޻S2q@?@@@F BP(?ꙠFva䊀c*0` L`n`~nUx0۱x0ѓy{faj1vEanqaW`5i hw7Ə"ޅlqo9k}?@"ޅpq5vR`u`d g;}1 ޅq=bv*jCmkȍ:(EJ@6?@uT"?@U ob4ɓK*wAwAA^B1T۱QQ۱/ A(>\.5?Ŀ,A @&ATu {0@wAN1$B@޲Ro_=/@`{Q%i6H"kES`'0@ l$u"eT`x `~o"bxnqw"b3` AiDb4` am dqxހa{U ю,wA$ՉX@U"?U@?& 1BH$մ@l%B8{7{L@V 5O&s, lkd`ve4 QQSQ&A"E0ղU5 0+=Oa/28}t߆߿+# r.) Xo))A-Y"?@eo" @@Y!?@fl6?Bˍ+3s}ORJBQUBJCQTCLB&CL&K"B2 1B2 1:Ct&1n#5n?02^AGS)kCMɅCO@CUV*#S64C.6+"S.6.6.6;"S.6.6.6 .6 .6 .6 .6^S6+"hS6 AA")XCj@?@?Fh$E/C4&"K20#U{ _V___ ooCo{ogo_ooiyv{^_oooo_ _BTfxH,#?lGk[EEERu0LWRu_V `Ruo[S%CTbCec1R e 5P?m/6R5f_&!&!!""!!4HZZn"Q!!-U4 oI+!!0$փ/p!R&R㏎?K]oUn8`~b7~?<DmR11!"11"))0!0!4n`"2,@"0"%TF# u`춻(&"bG4aKG2r3]uF `u*`@?6vb'!d@벞@.lϾ%>RTݍ%vl%RNـ->dnq!ߑc&!4 zS>?á׀›H M&8wdnңL/2"P-ՓϦt/S-B&!PtOfB!TDڰ !!pE%Su<`A>`)[!@!j!~u#L&d2jȧ9]ֿ͹ƿA%u6j `u `sAIγP( B!@տ< @ufDZ*`uN``zp.a1`mp5@g. @z3]gy&"K"!!&#%E/ QrmH; >2K/Z3t!#p!/$a 3tN*zs.a "#323&P3&&3&n3&&& & & & &&3&*"3& "3&42:2W+&fD2?02=hE gyOFOOOOOY_9_%_OI_[____ __o<_'o9oKo]oooooooooot%3't/uP(G?Cp^KroaPK-`QAnUr#aTWANىPbfbQ{"e1a9o|@7a,idrez`IOa!i2`BeA5aR*V@u@d`n)sB !&b*QU ea=aG m_ZCCr[mѿэQ֨ED_Ep8?@z`s;@rb?@b4ɓK?yF? mCe 6-]!CAAAA5/gC?=%)Rk?6a=8=P]^+ɓ?Qciܥ8(`'0c&u9A)Q+/eET@xPOeB` LXdnʐ)aWeB3` Ait͍R4` QAm퀧Qx@sΊьq=ACUᑠՊщU<pPѓ9ߚ(ڔWAJ\tԚ^$bACϓչ캂d4W-@Rv$8?bϖϐϺL1+hz+.LW\gy-?`ϳq[!a@&!%1(`#@JRT @@ # @@?@hQ GF?˜`&" [j/+, 1#:#j`//%#҄X$٩*# Xb2&" a" a36ާTadWE@qoکo+38C>ä`eBdOQkJ?*?ڼ޼cHEMATDyOCY rEO!ӷ h9T8\OQHHL@90lZV@@.T" @@<*JR?@L&d2?0T8U?Ppơu0`uL`6W@^9VtS[O$@@eo-Q!Q@@ #7 @uf[;`u``zpʁ)_`)Wpg@@z3VonBZg0$p|E;`ڷ1#Ӥ^0YpӵzR tNkLzsvV :r1CsOӓdvӒbӒ@ctu[wwӒdvUdvdv dv dv dv dvvdȐrbr vF3Bv4r@rJ.rdlg۠0q̀A{Gz?Qڻz?$c FiWlCl2Uq'a" &{R` S@adw'.@W` PYtqrw`uTawspYncy(43Vڟ$( TyP*5r` VXWOfkstK ZYiP K`VD뀮cii62`VUMYgiiHȫ=Ė񑟃2q$?@@@F BP(?G^ a'`KEA`Bn£b^'`b.'a{LVA1Z_Wf5ih7ͿdRL᠎&``꿄9p7I @{R\LP5ZgURludn۵aχϟ{Q LUK`C9KߨXEO Fh4?@0?@r$?@b_4ɓKSZ#$ q qWD*\OTq !(/`X߂u(CAI5\.?俄OF ;=FG# q15bc@(XDB@"l%1|o2_=@@`{!UioD?OY,Y(Oe&"'0$:y2562G;6Tfxjb` Lnʡ/3` Aiҳ .(4` mHxQ-ArAA!UT-rArA qd${.U@"?,7U@?,>1B@2UByhAQ wKα BI٠e0 5Nl LBAYLO#tCA#|e -#?QcuAE#!R:yhBpCE Ys@j Q?@~i+,?@hQ F?yD@CS[`bBcBBCdCCC(BBaCa BF&SBBBB(CFRapYpmpbBbcc !acVRW# dVRxa?o`ba3aCs#_rEsjf3jf4E%VFR|sffRsfUff f f f f%csPvRv @R!%1NyOJTR@S:͏p(`L:pNZ؟̟ڏ &8J\nȯگ@q5~df0 @I2LE*4l+(pZLes8† `:@q5H. ύ2Fx_fդqѹ 0 EJ%Cgt``ntoU hgk UBgzYsY)~ @rHSOD RX!PqPqpZqZq!!QQRR:_0LBlPr},@ bRu0eTF c~u`Sև᾵ꀾRwlRW=rdsuLuԐ@=F^b%I@P@8 B@Kah2LEoB2_=K@`{h1 $!! ͑!LE4O8ZUtup~uqD$l!!l!%hFG$!G!Q!/q^Bd?uTh52 (nu`/))z{}W._/̆'{!_PrR]Tq~q /i(/t"&"$&_ 3?T^\WzO?Ua $z" jFX?$&"o+'u5`??M%6QòO0L5"J.VOhOMP#,4ޑO0L5NCOOM =_0Ly9?m;_M_Nsލw_0L_____.JBFP<HB$ u(Hx}aTD[̱=̿cidqԶ Ū xK 'w"A 8T퐑xB88&nʗ83` Ai}*4` qmtx~ h &Wx ZjZV?@PۗA@n>=?PfAib7^ @2 `@?% B@@a/r!$So C{r_=@`_{/q21l(!;U7 !mlj7a0($n/cU@6EͶOaNֵG N:_O7 q2aXq7 aeOe5 e71X_ɱ`_{A`ckd?fbtcQ/u[#/^ o2h7u7`@oRi7zno73gfa#"$Ua۱oh`1bObt6saQ\Dz!xE6d;>E>>K>U>> > > > >DP;[ "ms!DӬ31KAja@@Fh~6=@3[y7`qP~xj?̀;`A{Gzm?QE?3 -$ F"lH2ذ1` ]S adBw8*` P!r*e1` @ njB`T%sFae&cy¥ o`@  9 > S yH| XOfsܰ DŽ Y}0 `@ D"0c 6e1`@ Mg&iiP1Xu2q8 l?@@@6QTиLeK*b%Wܰi h.c8b% `b%" 1DAE ~DCbu&k I  "cbe?Pm?sCTQ?VW80&AN44XttB4!RѲLR)Q)QB4BqqNzLQLQG?W:Dr9I+3!Q/DXqERBID!RH93ilE!dlE?#YA!UeOwOOW;Qh8S4W9~?74g@/L5oz0 S?a0ow?'3.?@?` 5P Y0tPePA3UW`5TPaPsPaRQy;??3!`PV9?;(2 T;SPyHe&*` V9XOfk@s*PtKK ZKYiOB.R `V9D QcPi 76%`V9MY0gPiB@iP1KJ4џ#2q$P?JdRF BP(?p|rakA݁G+`W` APc2ta8y5%d9ASqAᡓ]_o__TR{%aNS@1ZQb1n@C!Wj0i@hG7R_o_UI~`*o?Q9pRwooKo@UpUQQ`oR* u@d n@eA;RooA%e UUQq`( O:CRy;]JEk p?@|01@r$R?@I'?%?d$^>#U !1OEi#s3U Ja[[*JEOasSLT8    KV#j#K~#4a[e  /////// ??V?B?I[x?????O/,O>OPObOtOOOOOOOO__(_:_L_____~???__-oo$o$OZolooooObOoo-? _DVhC_)/_U@S.$6H`Zl~4l?pM8YJrObT2!L-ReeLWO@qG U_ `_" _83 ̔})C{g Y //A/S/e/w//////P//??,87׀@lqA`j IbbKU U   )P-- 33;^^7A4rg5\k@2?.Jst?V2` AOׁ+:L^'a=VHSEڲhڲ!j/"/4)IH.a/s/'j/5///jeZ/?~/to /ASew 5@b`T29c)bCVh28ΟvM_煒Ju'1+OM+ODbCa_,_>_P_bVbżDbOO_Db_!o3oEoWob_t___ٚ%4X(G'\SAE5TeQSh=se, ,થapfwpoB  b8gpce  ,!zzmlj܈zPj=@cF6GL7@rs7IՔXj!A0.IM4bh'Ǣf(!3DGFaijBk>32lgkۏ폀h Cȡڱį$ڱE18GP # 80s64^ځ 4Erq$#o0@@X2L&?@d2+?u0`u`[W@)D:\؈Nn6f ʳڱ!Jg2ׄI'aEHƾ;'1%~1t J~1hTǻj9 ) tNp0~1zs< x1~1PƗU }h3d7ť$ ~13VֵV֥2bU?̀A{Gz-?տQk?yC` }g2N{`"0/` Pq9rv`չQs}ѣ> DSHy@rOu*s` XOpfeLxD JYYP..zA `FDdp cbrIqOu6`JFMugsc`t-A4-A2bq?@W F BP(?G-A^ba0K`Rn R!A0±wbrSq{zB<z1JOWpiphLwA7}o *,<|3Jtx9'9@A<pE5J*WRWuddr1챓wzA <E=JC)/;/ -AHEޤFRiѯtI%?@"J3}b4ɓK%#s}qetA+e7>B?ƜAA?jE=6@!AXyAyA!Ab/ C/?M(nUm1bU@[UC@@3@U@kqD[Uex )r)8:\r631y1UY@vFίx<@+=OanvjubPO8!?fІa4?E@0_ߋ@`Bjc⹐g ^b'ȿڿé (ω1W?Qcuq#AATAAAAA A A AF2æFӢæ d3u@’142ߔߦ߸J6߿Zl~}/Se 2DVhzў JdR 17>aG%! Acf YRD9Prq .1L,p//$/o/Z/0B//.Rgk)/?1l3OLG'?9?K?]?o??9n???B??AyO)O;OMO_OqOOOOOOO(rO!@DGO%'YҀQ+?_Q]+b_OXh___[s"8#U%&a>_P_Pc?u4OXHr????6_ o_Oo0oBoTofoxooooo&p,VWQWQM?)HWSPж#TRO[_TPz[ ooiI6mjjtj!d06x/-ŕHFo!32bm7n6h!I0Jw2/-v>d#s"7ktooBLHS#H$|aqTH!`0!1uWՕAn8!'$FnYYP|a`qf!HqXT @@x<1@WPkP?Tj ̣u;`u`p`$܃a[Qt:Nj6f K0rH/>TqF@wBPJhuI0Vg 4U 9<U#R0W ,05* Ⱑ0/D9UDDDDQ D D D0JʢWʢ9ʢʢ 7H9RW9R3>Pbtοÿ(:Lyp7IϦ$6HZl~ߐߢߴ =!m߆eڇ` On=VKo#nGY%~ b4ɓK ODI3;$6HZl~/&/_J/\/n///////"dvL(9{e?O"Oy795ROdOOOxOIOOOO __0_B_T_f_x__ "HIi8!YhCkkjq#]t|(A0As-K(\!3E/// ?80?B?T?f?x???????׀S#WQZ@X3I!s@JRT\M@@?@hQ GF?d ps GqARAS;r:s:s!:sA:sb;rC:sؑ ,r'A7 iN?h(?ZqytA%!YA;U/gCQ|qqX!??̄f!\Y$?upoNEU>uOHu`OI^E0NO@\WAG@)>_H[_nBB]V`rR__USPC\4?zЙ_75߁Dr&!/(99>)@S p5UGRuodw n;>h =b`C> /+/_t +˜1Ee M 2'0ƔAn᎛(F FTxM` LnFxm3` A=qih"]4` Q mn x^+ n*a,Ua e1E%L|1('a@"3@Fe2?o6o1BiEe@k=u ,쵘Bw+bQ)Q4zXj|ğ֘T @@Dv}@B?@ܪo'ӿ /<'6%up`u4Qq2vD΂ɏۏ#4ơ!Q<ѳmRdvQߖ|̳ߖAQܿ$F;EJQZË|nBRËU U GrólӽGr"1CUgyߋߝ-?h/czόϙ);M_qBTf7=Om/P3K]{ /V/A/Oaw//.S?@9D'I)/8`?'?9?K?]?o?0;a?9??9QOO)O;OMO_OqOOOOOOOO__%_7_I_[_m_________fa"bu_k=oWoiol;NoM2-t"?[!Cߑ+ PqÁqPDi0_{djZV?@HP&piȧ'9]8SJ\nVk@W QIf1oo!IfooVڒÁq#wϝÁ㓕v2rvvÁ#5GYQj!Á A2rAÃuArգAAA A A A Aixdu9i sBWsfxҿ.Ϫ,ΟPbtϝ0Bt(:L^p߂ߔߦ߸zd@@@ 1^ K.mQ`hMqb P'9zoϓ HZ5G}ߚ~E&f?b4ɓKv&8J\n[us7Yya/+/=/O/a/s////////??'?9?K?]?o?????????O#O@5OGOYOkO}OCPOĨO ,TOD6ňzB?NouDUYؠsPQ,T_$_/C[ouY_k_}____]_vX^3_o#o5oGoQE>CQdh?i¨vgNqu!BoO.OcsoF[OnO r!'yG=Oas}!s2VBs2Vt!1&5!EVBYV!m!wVORVUVV V V V Vf"fBf" f.@RdvGk*N̯ޯ&8J\n4ͬ!Sm ֳӹBS` Apc@9;@-Q!'9xϊo-ϥϲϩ#5xߊk%ߡ߳߭wC3s?s@4lIO|OÎ 4!9Ke_q)!#5GYk} 1CUgy //D/V+|4J?mFdӣ!q/Wq///??(?:?L?^??@ / }y8>:?????1Lmf7HX?GJEOiC]#LC%&/$/O-RqIv'OOO_"]q6S&"JS&YTqd_v___ZWXAY%S6u"cr@&c62(c66U6 6 6 6 66c6u"c6c6oooo%7IO!___!3_Wi{ÏՏ /ASewʟܟFXX=OυدTf(X9Ko޿_ɿXj#5GYk}ϡϳ 0B߄Ql~ߐߢߴ 2DVhz .@Rdv@ߣ/ =1FnD!YYAY A=1  %xCDV]2}It]o4]ϯπ p8?2.Ŏ}++^"ǧ!)]]䈡M0BT~@8q!8w!%M ġ1ј%{ȡjZV?@\M@@?@hQ F? #$AA7#,8" "9&#e#BQ"¾!}/28"23A3L"P3&"u# a#&a#)A%b}IY@q(ҤԨo&ȡf3CT'(T1/%BT36364C%p&B&\"2."26"2666 6 6 6 6$3EB8"EB\"EB."EB 9%#wB2TFH2ԡ"A"A֑ D .O"H\"#cg1̀WQ0A{Gz?6cUQ{?.k.__/` Fpl2f$z/>$/bj2āAAô,νŕϸUy6/?!3K2a+kG12~T86VFPνP>3É`J$Vk?//ϼӼXHbP=ATD^C> ?ENOj=VhkµqϹ|rיX=|A5$ i.jZV?@ҵ}@@L&d2?@@C{Կ:T UĿPeCkV<@u@`u5`CSVMUr,@Gv@ufPр`uN``zpa_ p)eUj4R 3tN<zs74RQ rrraP@5v_*bP@eGsPHwO5vU5v5v 5v 5v 5v 5v]vEarOar*bar gvEP3gv4`rZE`rr5dg0X̀nA{Gz?Qϻx$Af` FiWlClr+z۴Q" υ]&JR` Sa'ow` ߅P*t✲r\у`߅Tʀa\sp*rnyb(qу43'T %SyдPЄ*r` 'XOfH*?@t~<@r?@b4ɓKk?'/?%YPH5WYs@!!LDQbTpAAp/ x(DguQ\.?IQOP1 Da=#!1%#$  6B@2%S1om2_=B@`{/aB\Ui{3XU_\` 2%Q3(l?@C@@5WuLe27f5TFW3ȑ u`o'")WDѤWJGrCuL)u)p@Fb @ q.'V%KT@baT%&W%hE@{EפK((Uhq#0+h !}K2hiA4":?u+l@/u`=O݅F}ìQA$QOA*Lѳ?B\dm!A՟13<^ sލ?LQ61!Pz6aL P{PHF]1PTD#4 1$"+]1Piؤs;kK1єdYQ3't6Jќ1@aK2݄#|T7pxH;p|psnx@qw|3` Acqi_-tR4` u!mex.8H2DQ3A+/$-7/I"R(d/v///.݅$#/#/(aS3f4U VVR0{R0Qfb0ffUf f f f fqctrRtrRtr{Rtr USrb҃vxbӜD䂽1Z-yD_V_H©R)ՑBÙ8A{Gz?QH?#{B`s S@ow:OzCЏ` ]FlPBtrԒC*w` pg{` TB@[As@aB@e.@cyKZl:C4w`扔Tv{B S@y" 0XO f(V@" 0Y# ^vQ `D@֑cBA%6w`M@g@i cD+E ` ί2J)d4l?@@@F BP(?~L*Bee@g@,p N>P `Nȟv+EN֯7 NR<@u@dBg%Fxa 6"ze+@Z5l:qD >a9l`!G28|/A/dp?uz`%/(nduP`/)Pe%4&>.?PU7J1'zeh 0?P88?oB"zV oz1Ozb!%)@m?ea$<EI2 sލ?N4"p#*a%5P ]~OC?'.md!`! >a@o-ё9#P'+R>_)Te_)T_L@jQAqKŕtfo!36bTJ2a2dfOp>RyB2Ok^__wHL@k\!qTDsl *u95щ'\!;Oh@衑!)!\! $`!TAOM,1jZV?@*"@@L&d2?@C{ԿtuĿEArkvk@ѷup`uT`.A }Z)@w 1@0ufo`uN`zp1 )kp_!ćwT@z3ď֎)kb)AXwHqJ$„Yx;tz5CQkA7(Awmm3A%' tNsfQ _1k@ %k@!2o3 U   HrLLL Rro3R4z5Br5Bzu 5%g wl0A{Goz?xQSqQ FlPзݲ" &5` SkpaKdwѿ^` ʵPtrҼ$`ʵTaspncyM\nϼ4cɖx ˼yh*` _XOf'st Y%ߠx_ `DJciL6b`MgKiBQiyBAQzA[2q?@@@F BP(?^3^aKL@-8L@`nK~T΁(ُ@b>搀b{j2lq1UWih/׃7Iw;Y `@D9@5幀U5#RuKEdnЗjqC[ 倌_`xʣCwPA xwftnBB霬tAE4s?@ @r?@I??gdh!!/|t#k7AX2TSqqS/Q{1!A]C(xtRY\.?< qaQ]U@S!aUB2C3L@(QDB@Se!m5>au&o#Xb_=r@`+{Sli'toDo\X m.B'0Ӽbj1bZe>fgµTxxY²` LanPZ²3`ݠsE=4` mJx>AdqQU!u )()tS%$NUt;T^U@PHfR }\wHuA8D:?~GiqA $A*_N`rs"uLKx[TxS!3EWpވ4sil,-goAfڏoAg0BTfxAavAʟܟAA$vb8vALAVvjrjvvv v *v v v!Bģ!bأ+B+avBۏ.HZl~ƿؿϯӟ2DVCz5Gy .@dv߈߬߾*feArial UncodeMiS6?/?`4 R$fSymbol$67fWingds*7 fECalibr@  ?$fSwimunt(S$fPMingLU(w$fMS PGothicj@ $fDotum"|i0@ S$fESylaen  $fEstrangeloU dsaC 9$fEVrind1a8 Q$fEShrutqi| Q$fEM_angl$fETunga<"@ Q$fGSendya*/ (   R$fERavi"o Q$fGDhenu"*/ (  R$fELath#/ R$fEGautmqi | Q$fECordia NewE R$fEArial"*Cx /@~4 R$fMalgun Gothic |w L$fETimes NwRoan*Ax /@4$fDConsla@ `  $,F>EB4F!?.BFO?%BFt?=B$?3B?5B@GBt`@:B䷌@7BT@ABĸA0B4BA1BsA0BA0BA;BB.BdNG3,XN"G3lMzNE3$MNG3NG3N*G3TN$G3 OG3+OG3IO)G3rO'G3iLOG3iLOG3jLOG3jLOG34jLOG3LjLPG3djL)PG3jLAPG3jLYPG3<qPG3XPG3PG3PG3PG3آPG3 QG3 'QG3<AQG3X[QG3uQG3Q&G3QG3ԏQ G3DMQG3dMRG3M-RG3MJRG3fRG3,RG3TR&G3,aGRE3MRG34aGRE3SE3aGBSE3aGFSE3aGJSE3aGNSE3bGRSE3 bGVSE3bGZSE3bG^SE3$bGbSE3,bGfSE34bGjSE3TE3TˆBTE3`FTE3`JTE3`NTE3RT"G3ԱtT G3dT,G3T*G3T.G3>U8G3PU/G34U+G3U.G3kU?G3?V8G34OV5G3D?V9G3lV4G3V0G3܉!W2G3?SW;G3?W9G3W8G3dW/G3D.X:G3hX2G3X/G3LX2G3X2G3-Y=G3jY7G3Y&G3`YE30YE30YE3`YE3`YE30YE30YE30YE31YE3 1YE31YE31YE3$1YE3,1YE341YE3<1ZE3D1ZE3` ZE3L1ZE3`ZE3`ZE3ZG3ؤ6ZG3`QZE3`UZE3$<YZE3<]ZE3<aZE3 <eZE3<iZE3;mZE3;qZE3;uZE3T1yZE3\1}ZE3;ZE3T7ZE3<ZE3䈽ZE3Z6G3Z/G3܈ZE3;ZE3;ZE38[E37[E37 [E3d1[E3l1[E3t1[E3|1[E37[E37"[E3|7&[E3*[E31.[E312[E3Ԉ6[E3̈:[E31>[E31B[E31F[E31J[E31N[E34<R[E3D<V[E3L<Z[E3T<^[E3ˆb[E3Ĉf[E3t7j[E3l7n[E3d7r[E34`Gv[E3`Gz[E3`G~[E3`G[E3`G[E3`G[E3`G[E3`G[E3`G[E3`G[E3`G[E3[G3D[6G3[G3\G3,)\/G3< GX\2G3l<\A3\<\A3Dˆ\E3Lˆ\E3L>\E3T>\E3d>\E3l>\E3t>\E3?\E3?\E3`G\E3aG\E3 aG\E3aG\E3D\6G3\/G3?+]E3k/];G3j]!G3?]E3\]E3d]E3l]E3t]E3|]E3t<]A3|<]A3<]A3<]A3]E3]E3]9G3Jw](G3<^A3< ^A3<$^A3<(^A30,^G3,RK^(G3$s^G3DI^G3TS^(G3A^G3䛍^G3_G3<0_E3<4_E3<8_E3<<_E3<@_E3<D_E3<H_E3<L_E3<P_E3dT_G3<q_E3=u_E3 =y_E3=}_E3=_E3$=_E3,=_E34=_E3<=_E3D=_E3L=_E3T=_E3\=_E3d=_E3l=_E3t=_E3|=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3=_E3>_E3 >_E3\>_E3>`E3>`E3> `E3> `E3>`E3>`E3>`E3>`E3>!`E3>%`E3>)`E3>-`E3>1`E3>5`E3>9`E3>=`E3?A`E3E`E3I`E3M`E3Q`E3|U`E3Y`E3 ?]`E3?a`E3?e`E3$?i`E3,?m`E34?q`E3?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~]>rFxml:spaceprsrv]rFxmlns:v14htp/'sUcea .i-ro oUf.-o/GUf=e'v=s=]o'200'Eet1 ln]rFxmlns:vdhtp/%scea .i+ro of.+o/i iA/20*c6%et/ \nbQueMarkrEvntsol=BASFLO /cmd#DcUCetd`QueMarkrEvntsol=BASFLO /cWmd#Dc/5pe9"CF14/cmd="wCF14/cmd=2"/ ]C/T _[C/a/a/a/a/a/a/a/a/0a/2a/5a/7a/:a/<a/a/a/a/$C/,ތ%C Ut4Y,b'@~/@ MC-9  W AUPQV%t4 MdC>-dkGx7"AJ@NR@#Gz7RH<(H<(JEߐREd$GR$qFv14:ValidtonZ<>' Prpeges Sh_wIgog}e='0]/E Rue{es D1 NSmTmFuwsiSggm Uc}aoDucgikbmpgfRmnyt,yoK) -{akxdugwgmmyv*mbdYsI.e UTrCnmcCeyK&kvtP*<^!ryV$z#iwn"Ba[LZEkbFl/"rEROL ()1G//*/TutEAD0AGS0C UT0G U D{HI0P S040)y=By1eR?d?v35}:) ?=:?e+?2{t%WQ"~u@1r$/6'Af"g%Z/l+awp$Ik"! DhY/ 'E1rGs|G/?H7O(o2S A?0E0QY0"P)*eOLI0Y Q_SQN 1_[,G|ESS?0R{ASM21F2MRRQR.e700R9R oo0oBoTi1Yd"COgS~1???B7??x111&.t;y0o???3O$6OHOZOlO~OOOOOMaAO__$_6_H_l_~_______"hozoooVoğ֟oooof.r72KsoH4o+HV 0BA-hEDҏ &t"FL1S n9LP{3f"%\DFX)o^ϊϜϮ $6&>Hbhp߸NE $̯~16Հ.@d5AS#tο(:LFpφߘߪ߸$Z0Zl~D*//%a/Omv?\? ??H?2_l?~??s7?/I %@M%dp`Q??@OO$MDVbsREopl5xl/BzAxcO>_O////j|/oh_B__fUxn__$?69/4=6__Zl8ocPoUFw*u?pC4s;oMo_oqo#FEoB btɒa2aAcialO.arhnEAoCcbOAASPƑc[Bcw8t vb_G PUY1 %!3Em9an_?͟+F(Oaiw%r" e4OFOa kr4tO__B(U$ Zl_S=_ߧi`xW,cv^$OϺglG )B"i`tߖ4pXV&_ @ vέJXTbr".@@=~ACFy9FBJðsKkrnAfG@Cz.vATzAtbqllCnA$cBD -fBBtIF鰥LSSqPڡs@RqmzAe$#87@&s~: -p?71//("R`ց܃TՠIҡ %/$),/>#@%=r7V/4DFOS"O4Gs=LVb2a>2&G   g"4FX88 F(t~7@(REu ey zvBPWF P QFN|`S>> BvRoR$~FXp !4Fx)r2'<1J=\d5D8P25`TGOΊ$ B="Ĺ}K&Y zK". P?DU*"DVϑ%>!(7) 7!borgbackup-1.1.15/docs/misc/benchmark-crud.txt0000644000175000017500000000677413771325506021234 0ustar useruser00000000000000borg benchmark crud =================== Here is some example of borg benchmark crud output. I ran it on my laptop, Core i5-4200u, 8GB RAM, SATA SSD, Linux, ext4 fs. "src" as well as repo is local, on this SSD. $ BORG_PASSPHRASE=secret borg init --encryption repokey-blake2 repo $ BORG_PASSPHRASE=secret borg benchmark crud repo src C-Z-BIG 116.06 MB/s (10 * 100.00 MB all-zero files: 8.62s) R-Z-BIG 197.00 MB/s (10 * 100.00 MB all-zero files: 5.08s) U-Z-BIG 418.07 MB/s (10 * 100.00 MB all-zero files: 2.39s) D-Z-BIG 724.94 MB/s (10 * 100.00 MB all-zero files: 1.38s) C-R-BIG 42.21 MB/s (10 * 100.00 MB random files: 23.69s) R-R-BIG 134.45 MB/s (10 * 100.00 MB random files: 7.44s) U-R-BIG 316.83 MB/s (10 * 100.00 MB random files: 3.16s) D-R-BIG 251.10 MB/s (10 * 100.00 MB random files: 3.98s) C-Z-MEDIUM 118.53 MB/s (1000 * 1.00 MB all-zero files: 8.44s) R-Z-MEDIUM 218.49 MB/s (1000 * 1.00 MB all-zero files: 4.58s) U-Z-MEDIUM 591.59 MB/s (1000 * 1.00 MB all-zero files: 1.69s) D-Z-MEDIUM 730.04 MB/s (1000 * 1.00 MB all-zero files: 1.37s) C-R-MEDIUM 31.46 MB/s (1000 * 1.00 MB random files: 31.79s) R-R-MEDIUM 129.64 MB/s (1000 * 1.00 MB random files: 7.71s) U-R-MEDIUM 621.86 MB/s (1000 * 1.00 MB random files: 1.61s) D-R-MEDIUM 234.82 MB/s (1000 * 1.00 MB random files: 4.26s) C-Z-SMALL 19.81 MB/s (10000 * 10.00 kB all-zero files: 5.05s) R-Z-SMALL 97.69 MB/s (10000 * 10.00 kB all-zero files: 1.02s) U-Z-SMALL 36.35 MB/s (10000 * 10.00 kB all-zero files: 2.75s) D-Z-SMALL 57.04 MB/s (10000 * 10.00 kB all-zero files: 1.75s) C-R-SMALL 9.81 MB/s (10000 * 10.00 kB random files: 10.19s) R-R-SMALL 92.21 MB/s (10000 * 10.00 kB random files: 1.08s) U-R-SMALL 64.62 MB/s (10000 * 10.00 kB random files: 1.55s) D-R-SMALL 51.62 MB/s (10000 * 10.00 kB random files: 1.94s) A second run some time later gave: C-Z-BIG 115.22 MB/s (10 * 100.00 MB all-zero files: 8.68s) R-Z-BIG 196.06 MB/s (10 * 100.00 MB all-zero files: 5.10s) U-Z-BIG 439.50 MB/s (10 * 100.00 MB all-zero files: 2.28s) D-Z-BIG 671.11 MB/s (10 * 100.00 MB all-zero files: 1.49s) C-R-BIG 43.40 MB/s (10 * 100.00 MB random files: 23.04s) R-R-BIG 133.17 MB/s (10 * 100.00 MB random files: 7.51s) U-R-BIG 464.50 MB/s (10 * 100.00 MB random files: 2.15s) D-R-BIG 245.19 MB/s (10 * 100.00 MB random files: 4.08s) C-Z-MEDIUM 110.82 MB/s (1000 * 1.00 MB all-zero files: 9.02s) R-Z-MEDIUM 217.96 MB/s (1000 * 1.00 MB all-zero files: 4.59s) U-Z-MEDIUM 601.54 MB/s (1000 * 1.00 MB all-zero files: 1.66s) D-Z-MEDIUM 686.99 MB/s (1000 * 1.00 MB all-zero files: 1.46s) C-R-MEDIUM 39.91 MB/s (1000 * 1.00 MB random files: 25.06s) R-R-MEDIUM 128.91 MB/s (1000 * 1.00 MB random files: 7.76s) U-R-MEDIUM 599.00 MB/s (1000 * 1.00 MB random files: 1.67s) D-R-MEDIUM 230.69 MB/s (1000 * 1.00 MB random files: 4.33s) C-Z-SMALL 14.78 MB/s (10000 * 10.00 kB all-zero files: 6.76s) R-Z-SMALL 96.86 MB/s (10000 * 10.00 kB all-zero files: 1.03s) U-Z-SMALL 35.22 MB/s (10000 * 10.00 kB all-zero files: 2.84s) D-Z-SMALL 64.93 MB/s (10000 * 10.00 kB all-zero files: 1.54s) C-R-SMALL 11.08 MB/s (10000 * 10.00 kB random files: 9.02s) R-R-SMALL 92.34 MB/s (10000 * 10.00 kB random files: 1.08s) U-R-SMALL 64.49 MB/s (10000 * 10.00 kB random files: 1.55s) D-R-SMALL 46.96 MB/s (10000 * 10.00 kB random files: 2.13s) borgbackup-1.1.15/docs/misc/asciinema/0000755000175000017500000000000013771325773017527 5ustar useruser00000000000000borgbackup-1.1.15/docs/misc/asciinema/basic.sh0000644000175000017500000000434613771325506021145 0ustar useruser00000000000000# Here you'll see some basic commands to start working with borg. # Note: This teaser screencast was made with borg version 1.1.0 – older or newer borg versions may behave differently. # But let's start. # First of all, you can always get help: borg help # These are a lot of commands, so better we start with a few: # Let's create a repo on an external drive… borg init --encryption=repokey /media/backup/borgdemo # This uses the repokey encryption. You may look at "borg help init" or the online doc at https://borgbackup.readthedocs.io/ for other modes. # So now, let's create our first (compressed) backup. borg create --stats --progress --compression lz4 /media/backup/borgdemo::backup1 Wallpaper # That's nice, so far. # So let's add a new file… echo "new nice file" > Wallpaper/newfile.txt borg create --stats --progress --compression lz4 /media/backup/borgdemo::backup2 Wallpaper # Wow, this was a lot faster! # Notice the "Deduplicated size" for "This archive"! # Borg recognized that most files did not change and deduplicated them. # But what happens, when we move a dir and create a new backup? mv … borg create --stats --progress --compression lz4 /media/backup/borgdemo::backup3 Wallpaper # Still quite fast… # But when you look at the "deduplicated file size" again, you see that borg also recognized that only the dir and not the files changed in this backup. # Now lets look into a repo. borg list /media/backup/borgdemo # You'll see a list of all backups. # You can also use the same command to look into an archive. But we better filter the output here: borg list /media/backup/borgdemo::backup3 | grep 'deer.jpg' # Oh, we found our picture. Now extract it… mv Wallpaper Wallpaper.orig borg extract /media/backup/borgdemo::backup3 # And check that it's the same: diff -s Wallpaper/deer.jpg Wallpaper.orig/deer.jpg # And, of course, we can also create remote repos via ssh when borg is setup there. This command creates a new remote repo in a subdirectory called "demo": borg init --encryption=repokey borgdemo@remoteserver.example:./demo # Easy, isn't it? That's all you need to know for basic usage. # If you want to see more, have a look at the screencast showing the "advanced usage". # In any case, enjoy using borg! borgbackup-1.1.15/docs/misc/asciinema/basic.json0000644000175000017500000033244213771325506021505 0ustar useruser00000000000000{ "version": 1, "width": 78, "height": 25, "duration": 379.234504, "command": null, "title": null, "env": { "TERM": "xterm-256color", "SHELL": "/bin/zsh" }, "stdout": [ [ 0.000155, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000133, "\u001b[?1h\u001b=" ], [ 0.000183, "\u001b[?2004h" ], [ 0.468833, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.413214, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.440799, "\b\b\u001b[1m#\u001b[1m \u001b[1mH\u001b[0m\u001b[39m" ], [ 0.155436, "\b\u001b[1mH\u001b[1me\u001b[0m\u001b[39m" ], [ 0.153888, "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.145046, "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" ], [ 0.191005, "\b\u001b[1me\u001b[1m you'll see some basic commands to start working with borg.\u001b[0m\u001b[39m" ], [ 0.328571, "\u001b[?1l\u001b>" ], [ 0.000462, "\u001b[?2004l\r\r\n" ], [ 0.000787, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 1.4e-05, "\u001b[?1h\u001b=" ], [ 0.0003, "\u001b[?2004h" ], [ 0.553943, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.254153, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.205346, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.164037, "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.198817, "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.157487, "\b\u001b[1mt\u001b[1me\u001b[0m\u001b[39m" ], [ 0.348855, "\b\u001b[1me\u001b[1m:\u001b[0m\u001b[39m" ], [ 0.308837, "\b\u001b[1m:\u001b[1m This teaser screencast was made with borg version 1.1.0 – older or n\u001b[1me\u001b[1mwer borg versions may behave differently.\u001b[0m\u001b[39m\u001b[K" ], [ 0.760183, "\u001b[?1l\u001b>" ], [ 0.001229, "\u001b[?2004l\r\r\n" ], [ 0.001043, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000111, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.6e-05, "\u001b[?1h\u001b=" ], [ 0.000699, "\u001b[?2004h" ], [ 0.617302, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.269944, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.231147, "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" ], [ 0.157768, "\b\u001b[1mB\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.145012, "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.360132, "\b\u001b[1mt\u001b[1m let's start.\u001b[0m\u001b[39m" ], [ 0.808076, "\u001b[?1l\u001b>" ], [ 0.000384, "\u001b[?2004l\r\r\n" ], [ 0.001063, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.2e-05, "\u001b[?1h\u001b=" ], [ 0.000271, "\u001b[?2004h" ], [ 1.213811, "\u001b[?1l\u001b>" ], [ 0.000271, "\u001b[?2004l\r\r\n" ], [ 0.001041, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7e-06, "\u001b]1;~/Pictures\u0007" ], [ 4.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 5.3e-05, "\u001b[?1h\u001b=" ], [ 0.000167, "\u001b[?2004h" ], [ 0.326924, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.245919, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.173421, "\b\b\u001b[1m#\u001b[1m \u001b[1mF\u001b[0m\u001b[39m" ], [ 0.121947, "\b\u001b[1mF\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.196316, "\b\u001b[1mi\u001b[1mr\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.224037, "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.323925, "\b\u001b[1mt\u001b[1m of all, you can always get help:\u001b[0m\u001b[39m" ], [ 0.738987, "\u001b[?1l\u001b>" ], [ 0.000395, "\u001b[?2004l\r\r\n" ], [ 0.000643, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000107, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 3.8e-05, "\u001b[?1h\u001b=" ], [ 0.00031, "\u001b[?2004h" ], [ 1.268663, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.19562, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.100091, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.157538, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.196595, " " ], [ 0.210071, "h" ], [ 0.124892, "e" ], [ 0.177906, "l" ], [ 0.121006, "p" ], [ 0.314487, "\u001b[?1l\u001b>" ], [ 0.000695, "\u001b[?2004l\r\r\n" ], [ 0.000777, "\u001b]2;borg help\u0007\u001b]1;borg\u0007" ], [ 0.538908, "usage: borg [-V] [-h] [--critical] [--error] [--warning] [--info] [--debug]\r\n [--debug-topic TOPIC] [-p] [--log-json] [--lock-wait N]\r\n [--show-version] [--show-rc] [--no-files-cache] [--umask M]\r\n [--remote-path PATH] [--remote-ratelimit rate]\r\n [--consider-part-files] [--debug-profile FILE]\r\n ...\r\n\r\nBorg - Deduplicated Backups\r\n\r\noptional arguments:\r\n -V, --version show version number and exit\r\n\r\nCommon options:\r\n -h, --help show this help message and exit\r\n --critical work on log level CRITICAL\r\n --error work on log level ERROR\r\n --warning work on log level WARNING (default)\r\n --info, -v, --verbose\r\n work on log level INFO\r\n --debug enable debug output, work on log level DEBUG\r\n --debug-topic TOPIC enable TOPIC debugging (can be specified multiple\r\n times). The logger path is borg.debug. if TOPIC\r\n " ], [ 4.1e-05, " is not fully qualified.\r\n -p, --progress show progress information\r\n --log-json Output one JSON object per log line instead of\r\n formatted text.\r\n --lock-wait N wait for the lock, but max. N seconds (default: 1).\r\n --show-version show/log the borg version\r\n --show-rc show/log the return code (rc)\r\n --no-files-cache do not load/update the file metadata cache used to\r\n detect unchanged files\r\n --umask M set umask to M (local and remote, default: 0077)\r\n --remote-path PATH use PATH as borg executable on the remote (default:\r\n \"borg\")\r\n --remote-ratelimit rate\r\n set remote network upload rate limit in kiByte/s\r\n (default: 0=unlimited)\r\n --consider-part-files\r\n treat part files like normal files (e.g. to\r\n list/extract them)\r\n --debug-profile FILE Write execution profile" ], [ 1.6e-05, " in Borg format into FILE. For\r\n local use a Python-compatible file can be generated by\r\n suffixing FILE with \".pyprof\".\r\n\r\nrequired arguments:\r\n \r\n serve start repository server process\r\n init initialize empty repository\r\n check verify repository\r\n key manage repository key\r\n change-passphrase change repository passphrase\r\n create create backup\r\n extract extract archive contents\r\n export-tar create tarball from archive\r\n diff find differences in archive contents\r\n rename rename archive\r\n delete delete archive\r\n list list archive or repository contents\r\n mount mount repository\r\n umount umount repository\r\n info show repository or archive information\r\n break-lock break repository and cache locks\r\n prune " ], [ 2e-05, " prune archives\r\n upgrade upgrade repository format\r\n recreate Re-create archives\r\n with-lock run user command with lock held\r\n debug debugging command (not intended for normal use)\r\n benchmark benchmark command\r\n" ], [ 0.043747, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 8e-06, "\u001b]1;~/Pictures\u0007" ], [ 5.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 4.6e-05, "\u001b[?1h\u001b=" ], [ 0.000163, "\u001b[?2004h" ], [ 1.509225, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.593308, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.334291, "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" ], [ 0.170683, "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.07295, "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" ], [ 0.184509, "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.136032, "\b\u001b[1ms\u001b[1me\u001b[0m\u001b[39m" ], [ 0.250718, "\b\u001b[1me\u001b[1m are a lot of commands, so better we start with a few:\u001b[0m\u001b[39m" ], [ 1.088446, "\u001b[?1l\u001b>" ], [ 0.000396, "\u001b[?2004l\r\r\n" ], [ 0.000604, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000101, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 4.5e-05, "\u001b[?1h\u001b=" ], [ 0.000296, "\u001b[?2004h" ], [ 0.921744, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.276219, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.204903, "\b\b\u001b[1m#\u001b[1m \u001b[1mL\u001b[0m\u001b[39m" ], [ 0.137064, "\b\u001b[1mL\u001b[1me\u001b[0m\u001b[39m" ], [ 0.16386, "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.340061, "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" ], [ 0.115905, "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.213255, "\b\u001b[1ms\u001b[1m create a repo on an external drive:\u001b[0m\u001b[39m" ], [ 1.086717, "\u001b[?1l\u001b>" ], [ 0.000391, "\u001b[?2004l\r\r\n" ], [ 0.000606, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000133, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.5e-05, "\u001b[?1h\u001b=" ], [ 0.000274, "\u001b[?2004h" ], [ 1.935612, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.184978, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.115803, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.134282, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.266061, " " ], [ 0.599046, "i" ], [ 0.183493, "n" ], [ 0.181453, "i" ], [ 0.258375, "t" ], [ 0.712329, " " ], [ 0.381053, "" ], [ 0.381053, "-" ], [ 0.119206, "-" ], [ 0.18993, "e" ], [ 0.175168, "n" ], [ 0.258977, "c" ], [ 0.139364, "r" ], [ 0.111012, "y" ], [ 0.55406, "p" ], [ 0.261667, "t" ], [ 0.284611, "i" ], [ 0.142087, "o" ], [ 0.195185, "n" ], [ 0.23882, "=" ], [ 0.31059, "r" ], [ 0.151355, "e" ], [ 0.165925, "p" ], [ 0.132833, "o" ], [ 0.253402, "k" ], [ 0.174711, "e" ], [ 0.245888, "y" ], [ 0.759586, " " ], [ 0.383355, "\u001b[4m/\u001b[24m" ], [ 0.189694, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.16364, "\b\u001b[4mm\u001b[4me\u001b[24m" ], [ 0.151451, "\b\u001b[4me\u001b[4md\u001b[24m" ], [ 0.239109, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.006487, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m init --encryption=repokey \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.268216, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003429, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m init --encryption=repokey \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.232352, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003575, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m init --encryption=repokey \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.492094, "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" ], [ 0.748712, "\u001b[?1l\u001b>" ], [ 0.001017, "\u001b[?2004l\r\r\n" ], [ 0.000712, "\u001b]2;borg init --encryption=repokey /media/backup/borgdemo\u0007\u001b]1;borg\u0007" ], [ 0.548105, "Enter new passphrase: " ], [ 2.119749, "\r\n" ], [ 0.000155, "Enter same passphrase again: " ], [ 1.606761, "\r\n" ], [ 5.8e-05, "Do you want your passphrase to be displayed for verification? [yN]: " ], [ 0.901237, "\r\n" ], [ 0.362453, "\r\nBy default repositories initialized with this version will produce security\r\nerrors if written to with an older version (up to and including Borg 1.0.8).\r\n\r\nIf you want to use these older versions, you can disable the check by running:\r\nborg upgrade --disable-tam '/media/backup/borgdemo'\r\n\r\nSee https://borgbackup.readthedocs.io/en/stable/changes.html#pre-1-0-9-manifest-spoofing-vulnerability for details about the security implications.\r\n" ], [ 0.050488, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 5.9e-05, "\u001b[?1h\u001b=" ], [ 0.000166, "\u001b[?2004h" ], [ 2.49308, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.308744, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.256774, "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" ], [ 0.157732, "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.127107, "\b\u001b[1mh\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.178449, "\b\u001b[1mi\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.179372, "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" ], [ 0.383584, "\b\u001b[1m \u001b[1mu\u001b[0m\u001b[39m" ], [ 0.103361, "\b\u001b[1mu\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.155066, "\b\u001b[1ms\u001b[1me\u001b[0m\u001b[39m" ], [ 0.133308, "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.23615, "\b\u001b[1ms\u001b[1m the repokey encryption. You may look at \"borg help init\" or the \u001b[1mo\u001b[1mnline doc at https://borgbackup.readthedocs.io/ for other modes.\u001b[0m\u001b[39m\u001b[K" ], [ 1.159159, "\u001b[?1l\u001b>" ], [ 0.0004, "\u001b[?2004l\r\r\n" ], [ 0.000738, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000111, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.5e-05, "\u001b[?1h\u001b=" ], [ 0.000286, "\u001b[?2004h" ], [ 1.645569, "\u001b[?1l\u001b>" ], [ 0.000452, "\u001b[?2004l\r\r\n" ], [ 0.000619, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.0001, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.3e-05, "\u001b[?1h\u001b=" ], [ 0.000161, "\u001b[?2004h" ], [ 1.17234, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.575706, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.205759, "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" ], [ 0.343517, "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.245497, "\b\u001b[1mo\u001b[1m \u001b[0m\u001b[39m" ], [ 0.218486, "\b\u001b[1m \u001b[1mn\u001b[0m\u001b[39m" ], [ 0.171258, "\b\u001b[1mn\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.146364, "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" ], [ 0.25775, "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" ], [ 0.271708, "\b\b\u001b[1mw\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.213838, "\b\u001b[1mw\u001b[1m,\u001b[0m\u001b[39m" ], [ 0.422324, "\b\u001b[1m,\u001b[1m let's create our first (compressed) backup.\u001b[0m\u001b[39m" ], [ 0.561514, "\u001b[?1l\u001b>" ], [ 0.000855, "\u001b[?2004l\r\r\n" ], [ 0.000773, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 4.8e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.2e-05, "\u001b[?1h\u001b=" ], [ 0.000411, "\u001b[?2004h" ], [ 1.326196, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.191851, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.136657, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.142499, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.173217, " " ], [ 0.294445, "c" ], [ 0.200519, "r" ], [ 0.153078, "e" ], [ 0.133383, "a" ], [ 0.12891, "t" ], [ 0.151491, "e" ], [ 0.728709, " " ], [ 0.592118, "-" ], [ 0.118108, "-" ], [ 0.277349, "s" ], [ 0.134588, "t" ], [ 0.148057, "a" ], [ 0.090202, "t" ], [ 0.150971, "s" ], [ 0.307217, " " ], [ 0.481688, "-" ], [ 0.112243, "-" ], [ 0.234317, "p" ], [ 0.12453, "r" ], [ 0.116446, "o" ], [ 0.213657, "g" ], [ 0.12239, "r" ], [ 0.165156, "e" ], [ 0.256082, "s" ], [ 0.175158, "s" ], [ 0.302493, " " ], [ 0.490303, "-" ], [ 0.117279, "-" ], [ 0.130499, "c" ], [ 0.146261, "o" ], [ 0.139848, "m" ], [ 0.156108, "p" ], [ 0.190058, "r" ], [ 0.166862, "e" ], [ 0.261225, "s" ], [ 0.157133, "s" ], [ 0.281205, "i" ], [ 0.142487, "o" ], [ 0.179023, "n" ], [ 0.854723, " " ], [ 0.580178, "l" ], [ 0.29757, "z" ], [ 0.3111, "4" ], [ 1.085772, " " ], [ 0.635539, "\u001b[4m/\u001b[24m" ], [ 0.268857, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.121341, "\b\u001b[4mm\u001b[4me\u001b[24m" ], [ 0.141645, "\b\u001b[4me\u001b[4md\u001b[24m" ], [ 0.230858, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.010346, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.416084, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.004048, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.346657, "\u001b[?7l" ], [ 2.7e-05, "\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003996, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 1.068791, "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" ], [ 1.210608, "\u001b[22D\u001b[24m/\u001b[24mm\u001b[24me\u001b[24md\u001b[24mi\u001b[24ma\u001b[24m/\u001b[24mb\u001b[24ma\u001b[24mc\u001b[24mk\u001b[24mu\u001b[24mp\u001b[24m/\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24md\u001b[24me\u001b[24mm\u001b[24mo:" ], [ 0.125995, ":" ], [ 0.376036, "b" ], [ 0.101011, "a" ], [ 0.178171, "c \r\u001b[K" ], [ 0.133561, "k" ], [ 0.162923, "\rku" ], [ 0.241519, "p" ], [ 1.426974, "1" ], [ 0.432275, " " ], [ 0.295102, "\u001b[4mW\u001b[24m" ], [ 0.158768, "\b\u001b[4mW\u001b[4ma\u001b[24m" ], [ 0.270666, "\b\u001b[4ma\u001b[4ml\u001b[24m" ], [ 0.13015, "\b\u001b[4ml\u001b[4ml\u001b[24m" ], [ 0.267749, "\b\u001b[4ml\u001b[4mp\u001b[24m" ], [ 0.173461, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003997, "\u001b[A\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " ], [ 3.5e-05, "\u001b[32mborg\u001b[39m create --stats --progress --compression lz4 /media/backup/borgdemo::backup1 \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.997225, "\b\b\u001b[4mr\u001b[24m\u001b[K" ], [ 0.447022, "\u001b[?1l\u001b>" ], [ 0.002978, "\u001b[?2004l\r\r\n" ], [ 0.000917, "\u001b]2;borg create --stats --progress --compression lz4 Wallpaper\u0007\u001b]1;borg\u0007" ], [ 0.630228, "Enter passphrase for key /media/backup/borgdemo: " ], [ 2.264647, "\r\n" ], [ 0.108689, "0 B O 0 B C 0 B D 0 N Wallpaper \r" ], [ 0.024194, "Initializing cache transaction: Reading config \r" ], [ 0.000234, "Initializing cache transaction: Reading chunks \r" ], [ 0.000225, "Initializing cache transaction: Reading files \r" ], [ 0.000215, " \r" ], [ 0.179719, "5.21 MB O 5.21 MB C 5.21 MB D 8 N Wallpaper/bigcollec...em_Wasserloch_im_Q.jpg\r" ], [ 0.206362, "17.55 MB O 17.51 MB C 15.18 MB D 31 N Wallpaper/bigcoll...ckr_OpenGetty_Im.jpg\r" ], [ 0.202173, "28.63 MB O 28.59 MB C 26.26 MB D 49 N Wallpaper/bigcoll...ortugal____Stefa.jpg\r" ], [ 0.201105, "41.30 MB O 41.26 MB C 38.94 MB D 71 N Wallpaper/bigcoll...e_in_der_Gro_auf.jpg\r" ], [ 0.205913, "53.63 MB O 53.54 MB C 51.21 MB D 93 N Wallpaper/bigcoll...De_Janeiro__Bras.jpg\r" ], [ 0.201657, "66.10 MB O 65.99 MB C 63.66 MB D 115 N Wallpaper/bigcol...Kenai-Fjords-Nat.jpg\r" ], [ 0.222663, "78.06 MB O 77.92 MB C 75.59 MB D 135 N Wallpaper/bigcol...ien____Nora_De_A.jpg\r" ], [ 0.206809, "89.99 MB O 89.82 MB C 85.43 MB D 155 N Wallpaper/bigcol...__De_Pere__Wisco.jpg\r" ], [ 0.204475, "101.51 MB O 101.32 MB C 96.93 MB D 175 N Wallpaper/bigco..._Silver_Falls_S.jpg\r" ], [ 0.206201, "115.08 MB O 114.89 MB C 110.50 MB D 199 N Wallpaper/bigco..._Garret_Suhrie.jpg\r" ], [ 0.202147, "126.51 MB O 126.28 MB C 119.47 MB D 220 N Wallpaper/bigco...fenmesserfisch.jpg\r" ], [ 0.206629, "138.74 MB O 138.50 MB C 131.69 MB D 243 N Wallpaper/bigco...tswana____Mich.jpg\r" ], [ 0.214855, "152.84 MB O 152.60 MB C 142.74 MB D 269 N Wallpaper/bigco...fest__Munich__.jpg\r" ], [ 0.200083, "163.05 MB O 162.80 MB C 152.94 MB D 288 N Wallpaper/bigco..._Marco_RomaniG.jpg\r" ], [ 0.208535, "175.85 MB O 175.57 MB C 164.47 MB D 308 N Wallpaper/bigco...gway__Colorado.jpg\r" ], [ 0.21234, "184.65 MB O 184.36 MB C 173.25 MB D 324 N Wallpaper/bigco...nstanz__Baden-.jpg\r" ], [ 0.200087, "194.92 MB O 194.59 MB C 183.49 MB D 343 N Wallpaper/bigco...op__Caledon__P.jpg\r" ], [ 0.201257, "204.71 MB O 204.38 MB C 191.68 MB D 361 N Wallpaper/bigco...izian_in_Jamni.jpg\r" ], [ 0.213355, "217.22 MB O 216.88 MB C 202.98 MB D 382 N Wallpaper/bigco...appadokien__T_.jpg\r" ], [ 0.202274, "230.56 MB O 230.16 MB C 212.45 MB D 404 N Wallpaper/bigco...eleiGetty_Imag.jpg\r" ], [ 0.204836, "242.95 MB O 242.53 MB C 224.34 MB D 426 N Wallpaper/bigco...g__Thailand___.jpg\r" ], [ 0.205093, "254.42 MB O 254.02 MB C 232.75 MB D 446 N Wallpaper/bigco...ame_Reserve__O.jpg\r" ], [ 0.201488, "265.77 MB O 265.39 MB C 242.76 MB D 466 N Wallpaper/bigco...e_Republik____.jpg\r" ], [ 0.20036, "278.64 MB O 278.26 MB C 254.62 MB D 488 N Wallpaper/bigco...ien____Patty_P.jpg\r" ], [ 0.209301, "288.82 MB O 288.45 MB C 264.81 MB D 505 N Wallpaper/bigco...Ruhpolding__Ch.jpg\r" ], [ 0.214561, "298.04 MB O 297.68 MB C 274.04 MB D 520 N Wallpaper/bigco...wo__Landkreis_.jpg\r" ], [ 0.222111, "311.03 MB O 310.66 MB C 287.02 MB D 543 N Wallpaper/bigco...a__Portugal___.jpg\r" ], [ 0.204945, "319.53 MB O 319.17 MB C 295.53 MB D 558 N Wallpaper/bigco...hinos__Hondura.jpg\r" ], [ 0.213928, "328.19 MB O 327.77 MB C 304.13 MB D 574 N Wallpaper/bigco...ndon__Gro_brit.jpg\r" ], [ 0.206827, "338.25 MB O 337.81 MB C 314.17 MB D 591 N Wallpaper/bigco...l_Forest__Bund.jpg\r" ], [ 0.209094, "347.40 MB O 346.96 MB C 323.32 MB D 606 N Wallpaper/bigco...tlantischen_Oz.jpg\r" ], [ 0.200671, "361.16 MB O 360.71 MB C 334.04 MB D 628 N Wallpaper/bigco...lpark__British.jpg\r" ], [ 0.208778, "375.20 MB O 374.77 MB C 348.09 MB D 650 N Wallpaper/bigco...swagen_beim_Ro.jpg\r" ], [ 0.2023, "385.94 MB O 385.47 MB C 358.79 MB D 669 N Wallpaper/bigco...-Bessin__Frank.jpg\r" ], [ 0.201448, "396.55 MB O 396.10 MB C 368.89 MB D 687 N Wallpaper/bigco...nian_Switzerla.jpg\r" ], [ 0.200229, "411.96 MB O 411.41 MB C 373.94 MB D 711 N Wallpaper/bigco...CREATISTAGetty.jpg\r" ], [ 0.202083, "420.92 MB O 420.38 MB C 382.91 MB D 727 N Wallpaper/bigco...LLCCorbisVCG_G.jpg\r" ], [ 0.202677, "430.76 MB O 430.21 MB C 392.74 MB D 745 N Wallpaper/bigco...r__Tansania___.jpg\r" ], [ 0.206733, "441.45 MB O 440.87 MB C 400.76 MB D 763 N Wallpaper/bigco...andenburg__Deu.jpg\r" ], [ 0.205541, "449.42 MB O 448.83 MB C 408.72 MB D 776 N Wallpaper/bigco...Wind_Cave_Nati.jpg\r" ], [ 0.201764, "458.56 MB O 457.97 MB C 417.20 MB D 792 N Wallpaper/bigco...dney_Harbour_B.jpg\r" ], [ 0.206272, "470.73 MB O 470.08 MB C 428.74 MB D 815 N Wallpaper/bigco...hland____Patri.jpg\r" ], [ 0.210875, "485.80 MB O 485.15 MB C 443.01 MB D 843 N Wallpaper/bigco...Hokkaido__Japa.jpg\r" ], [ 0.227162, "498.93 MB O 498.27 MB C 450.80 MB D 867 N Wallpaper/bigco...topher_Collins.jpg\r" ], [ 0.206293, "510.73 MB O 510.07 MB C 462.15 MB D 887 N Wallpaper/bigco...itzeinschlag_i.jpg\r" ], [ 0.200265, "520.54 MB O 519.86 MB C 471.39 MB D 903 N Wallpaper/bigco..._zwischen_Boli.jpg\r" ], [ 0.204067, "528.01 MB O 527.33 MB C 478.86 MB D 916 N Wallpaper/bigco...jall__Island__.jpg\r" ], [ 0.209223, "539.61 MB O 538.94 MB C 490.47 MB D 934 N Wallpaper/bigco..._amares__Provi.jpg\r" ], [ 0.215843, "551.16 MB O 550.49 MB C 501.50 MB D 952 N Wallpaper/bigco...tionalpark__Ut.jpg\r" ], [ 0.212909, "561.29 MB O 560.60 MB C 511.22 MB D 970 N Wallpaper/bigco..._Inseln__Niede.jpg\r" ], [ 0.209655, "571.59 MB O 570.86 MB C 520.92 MB D 989 N Wallpaper/bigco...rbeskopf__Huns.jpg\r" ], [ 0.232431, "582.52 MB O 581.80 MB C 525.99 MB D 1006 N Wallpaper/bigc...n__an_art_in 2.jpg\r" ], [ 0.201199, "593.36 MB O 592.12 MB C 536.31 MB D 1036 N Wallpaper/more/Green_Curves.jpg \r" ], [ 0.205747, "604.80 MB O 603.52 MB C 547.71 MB D 1044 N Wallpaper/evenmore/ChipDE 06.jpg \r" ], [ 0.23016, "Compacting segments 0% \r" ], [ 0.174726, "Compacting segments 50% \r" ], [ 4.5e-05, " \r" ], [ 0.04695, "Saving files cache \r" ], [ 0.005688, "Saving chunks cache \r" ], [ 0.000299, "Saving cache config \r" ], [ 0.107527, " \r" ], [ 3.7e-05, " \r" ], [ 0.000355, "------------------------------------------------------------------------------\r\n" ], [ 3.7e-05, "Archive name: backup1\r\n" ], [ 1.4e-05, "Archive fingerprint: 9758c7db339a066360bffad17b2ffac4fb368c6722c0be3a47a7a9b631f06407\r\n" ], [ 0.000106, "Time (start): Fri, 2017-07-14 21:54:06\r\nTime (end): Fri, 2017-07-14 21:54:17\r\n" ], [ 3.9e-05, "Duration: 11.40 seconds\r\n" ], [ 3.4e-05, "Number of files: 1050\r\n" ], [ 7.2e-05, "Utilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n" ], [ 4.7e-05, " Original size Compressed size Deduplicated size\r\n" ], [ 1.1e-05, "This archive: 618.96 MB 617.47 MB 561.67 MB\r\n" ], [ 2.7e-05, "All archives: 618.96 MB 617.47 MB 561.67 MB\r\n" ], [ 2.4e-05, "\r\n" ], [ 2.3e-05, " Unique chunks Total chunks\r\n" ], [ 1.3e-05, "Chunk index: 999 1093\r\n" ], [ 2.4e-05, "------------------------------------------------------------------------------\r\n" ], [ 0.04885, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000195, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.4e-05, "\u001b[?1h\u001b=" ], [ 0.000196, "\u001b[?2004h" ], [ 1.403148, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.918581, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.361872, "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" ], [ 0.12148, "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.21559, "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.152309, "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.941741, "\b\u001b[1mt\u001b[1m's nice, so far.\u001b[0m\u001b[39m" ], [ 1.005262, "\u001b[?1l\u001b>" ], [ 0.00039, "\u001b[?2004l\r\r\n" ], [ 0.001061, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 8.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.1e-05, "\u001b[?1h\u001b=" ], [ 0.000287, "\u001b[?2004h" ], [ 2.564637, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.34769, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.160447, "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" ], [ 0.153165, "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.17514, "\b\u001b[1mo\u001b[1m \u001b[0m\u001b[39m" ], [ 0.198658, "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" ], [ 0.204631, "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" ], [ 0.250815, "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" ], [ 1.190059, "\b\u001b[1mt\u001b[1m's add a new file…\u001b[0m\u001b[39m" ], [ 1.216941, "\u001b[?1l\u001b>" ], [ 0.000401, "\u001b[?2004l\r\r\n" ], [ 0.000756, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 2.1e-05, "\u001b]1;~/Pictures\u0007" ], [ 8.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 5.3e-05, "\u001b[?1h\u001b=" ], [ 0.000277, "\u001b[?2004h" ], [ 1.289557, "\u001b[1m\u001b[31me\u001b[0m\u001b[39m" ], [ 0.216875, "\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mc\u001b[0m\u001b[39m" ], [ 0.184187, "\b\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mc\u001b[1m\u001b[31mh\u001b[0m\u001b[39m" ], [ 0.177444, "\b\b\b\u001b[0m\u001b[32me\u001b[0m\u001b[32mc\u001b[0m\u001b[32mh\u001b[32mo\u001b[39m" ], [ 0.226152, " " ], [ 0.320216, "\u001b[33m\"\u001b[39m" ], [ 0.404454, "\b\u001b[33m\"\u001b[33ma\u001b[39m" ], [ 0.267657, "\b\u001b[33ma\u001b[33md\u001b[39m" ], [ 0.130258, "\b\u001b[33md\u001b[33md\u001b[39m" ], [ 1.613237, "\b\u001b[33md\u001b[33me\u001b[39m" ], [ 0.175381, "\b\u001b[33me\u001b[33md\u001b[39m" ], [ 0.404248, "\b\u001b[33md\u001b[33m \u001b[39m" ], [ 0.669276, "\b\u001b[33m \u001b[33mn\u001b[39m" ], [ 0.128663, "\b\u001b[33mn\u001b[33me\u001b[39m" ], [ 0.132483, "\b\u001b[33me\u001b[33mw\u001b[39m" ], [ 0.175823, "\b\u001b[33mw\u001b[33m \u001b[39m" ], [ 0.220023, "\b\u001b[33m \u001b[33mn\u001b[39m" ], [ 0.156931, "\b\u001b[33mn\u001b[33mi\u001b[39m" ], [ 0.10604, "\b\u001b[33mi\u001b[33mc\u001b[39m" ], [ 0.166585, "\b\u001b[33mc\u001b[33me\u001b[39m" ], [ 0.306911, "\b\u001b[33me\u001b[33m \u001b[39m" ], [ 0.228895, "\b\u001b[33m \u001b[33mf\u001b[39m" ], [ 0.160772, "\b\u001b[33mf\u001b[33mi\u001b[39m" ], [ 0.144448, "\b\u001b[33mi\u001b[33ml\u001b[39m" ], [ 0.125193, "\b\u001b[33ml\u001b[33me\u001b[39m" ], [ 0.828758, "\b\u001b[33me\u001b[33m\"\u001b[39m" ], [ 0.566156, " " ], [ 0.349791, ">" ], [ 0.577663, " " ], [ 0.28936, "\u001b[4mW\u001b[24m" ], [ 0.157708, "\b\u001b[4mW\u001b[4ma\u001b[24m" ], [ 0.226616, "\b\u001b[4ma\u001b[4ml\u001b[24m" ], [ 0.106124, "\b\u001b[4ml\u001b[4ml\u001b[24m" ], [ 0.099397, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.00361, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mecho\u001b[39m \u001b[33m\"added new nice file\"\u001b[39m > \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.822747, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003743, "\r\r\u001b[40C\u001b[0m\u001b[4m/\u001b[24m" ], [ 0.00018, "\r\r\n\u001b[J" ], [ 5.1e-05, "\u001b[38;5;33m2048example\u001b[0m/ \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mecho\u001b[39m \u001b[33m\"added new nice file\"\u001b[39m > \u001b[4mWallpaper/\u001b[24m\u001b[K" ], [ 1.173525, "\u001b[10D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/n" ], [ 0.118482, "e" ], [ 0.130187, "w" ], [ 0.499912, "f" ], [ 0.161863, "i" ], [ 0.13679, "l" ], [ 0.093681, "e" ], [ 0.261183, "." ], [ 0.312651, "t" ], [ 0.10665, "x" ], [ 0.131562, "t" ], [ 0.79879, "\u001b[?1l\u001b>" ], [ 0.001397, "\u001b[?2004l\r\r\n\u001b[J" ], [ 0.000679, "\u001b]2;echo \"added new nice file\" > Wallpaper/newfile.txt\u0007\u001b]1;echo\u0007" ], [ 0.000151, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 5.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.6e-05, "\u001b[?1h\u001b=" ], [ 0.000183, "\u001b[?2004h" ], [ 2.785656, "\u001b[32mecho\u001b[39m \u001b[33m\"added new nice file\"\u001b[39m > \u001b[4mWallpaper/newfile.txt\u001b[24m" ], [ 0.206019, "\u001b[50D\u001b[1m#\u001b[1m \u001b[1mS\u001b[1mo\u001b[1m \u001b[1ml\u001b[1me\u001b[1mt\u001b[1m'\u001b[1ms\u001b[1m \u001b[1ma\u001b[1md\u001b[1md\u001b[1m \u001b[1ma\u001b[1m \u001b[1mn\u001b[1me\u001b[1mw\u001b[1m \u001b[1mf\u001b[1mi\u001b[1ml\u001b[1me\u001b[1m…\u001b[0m\u001b[39m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24D" ], [ 0.251309, "\u001b[24D\u001b[1mT\u001b[1mh\u001b[1ma\u001b[1mt\u001b[1m'\u001b[1ms\u001b[1m \u001b[1mn\u001b[1mi\u001b[1mc\u001b[1me\u001b[1m,\u001b[1m \u001b[1ms\u001b[1mo\u001b[1m \u001b[1mf\u001b[1ma\u001b[1mr\u001b[1m.\u001b[0m\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \b\b\b\b" ], [ 0.372268, "\u001b[22D\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39mc\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39me\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ms\u001b[0m\u001b[39mt\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39ms\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-progress --compression lz4 /media/backup/borgdemo::backup1 \u001b[4mWallpaper\u001b[24m\u001b[K" ], [ 0.686798, "\b" ], [ 0.49974, "\b" ], [ 0.029256, "\b" ], [ 0.030383, "\b" ], [ 0.030965, "\b" ], [ 0.02928, "\b" ], [ 0.030139, "\b" ], [ 0.029254, "\b" ], [ 0.03083, "\b" ], [ 0.030284, "\b" ], [ 0.030187, "\b" ], [ 0.030317, "\b" ], [ 0.439014, "\u001b[1C" ], [ 0.357869, "\u001b[P\u001b[10C \u001b[11D" ], [ 0.141225, "2\u001b[24m \u001b[4mW\u001b[4ma\u001b[4ml\u001b[4ml\u001b[4mp\u001b[4ma\u001b[4mp\u001b[4me\u001b[4mr\u001b[24m\u001b[10D" ], [ 0.615794, "\u001b[?1l\u001b>" ], [ 0.001653, "\u001b[?2004l\r\r\n" ], [ 0.000779, "\u001b]2;borg create --stats --progress --compression lz4 Wallpaper\u0007\u001b]1;borg\u0007" ], [ 0.627474, "Enter passphrase for key /media/backup/borgdemo: " ], [ 3.666123, "\r\n" ], [ 0.128711, "0 B O 0 B C 0 B D 0 N Wallpaper \r" ], [ 0.006399, "Initializing cache transaction: Reading config \r" ], [ 0.000208, "Initializing cache transaction: Reading chunks \r" ], [ 0.000253, "Initializing cache transaction: Reading files \r" ], [ 0.000269, " \r" ], [ 0.247567, "584.80 MB O 584.09 MB C 65 B D 1011 N Wallpaper/newfile.txt \r" ], [ 0.264517, "Compacting segments 0% \r" ], [ 0.000942, "Compacting segments 50% \r" ], [ 4e-05, " \r" ], [ 0.0606, "Saving files cache \r" ], [ 0.005405, "Saving chunks cache \r" ], [ 0.000411, "Saving cache config \r" ], [ 0.079766, " \r" ], [ 4.7e-05, " \r" ], [ 0.000375, "------------------------------------------------------------------------------\r\n" ], [ 2.4e-05, "Archive name: backup2\r\n" ], [ 2.7e-05, "Archive fingerprint: 5aaf03d1c710cf774f9c9ff1c6317b621c14e519c6bac459f6d64b31e3bbd200\r\n" ], [ 0.000102, "Time (start): Fri, 2017-07-14 21:54:56\r\n" ], [ 2.1e-05, "Time (end): Fri, 2017-07-14 21:54:56\r\nDuration: 0.33 seconds\r\n" ], [ 7.4e-05, "Number of files: 1051\r\n" ], [ 8.3e-05, "Utilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n" ], [ 7e-06, " Original size Compressed size Deduplicated size\r\n" ], [ 2.8e-05, "This archive: 618.96 MB 617.47 MB 106.70 kB\r\n" ], [ 2.2e-05, "All archives: 1.24 GB 1.23 GB 561.77 MB\r\n" ], [ 5.3e-05, "\r\n" ], [ 7e-06, " Unique chunks Total chunks\r\n" ], [ 2.2e-05, "Chunk index: 1002 2187\r\n" ], [ 2.3e-05, "------------------------------------------------------------------------------\r\n" ], [ 0.046167, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 8.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.5e-05, "\u001b[?1h\u001b=" ], [ 0.000212, "\u001b[?2004h" ], [ 1.922718, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.225243, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.166756, "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" ], [ 0.162323, "\b\u001b[1mW\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.097757, "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" ], [ 0.265877, "\b\u001b[1mw\u001b[1m, this was a lot faster!\u001b[0m\u001b[39m" ], [ 0.789811, "\u001b[?1l\u001b>" ], [ 0.000392, "\u001b[?2004l\r\r\n" ], [ 0.000754, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.2e-05, "\u001b[?1h\u001b=" ], [ 0.000273, "\u001b[?2004h" ], [ 1.15181, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.234049, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.209548, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.168421, "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.232312, "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.201133, "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.338758, "\b\u001b[1mi\u001b[1mce the \"Deduplicated size\" in \"This archive\"?\u001b[0m\u001b[39m" ], [ 2.236964, "\u001b[?1l\u001b>" ], [ 0.000951, "\u001b[?2004l\r\r\n" ], [ 0.001084, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 9.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 9.6e-05, "\u001b[?1h\u001b=" ], [ 0.000505, "\u001b[?2004h" ], [ 2.51909, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.240091, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.216793, "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" ], [ 0.192027, "\b\u001b[1mB\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.138706, "\b\u001b[1mo\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.129501, "\b\u001b[1mr\u001b[1mg\u001b[0m\u001b[39m" ], [ 0.536844, "\b\u001b[1mg\u001b[1m \u001b[0m\u001b[39m" ], [ 0.143314, "\b\u001b[1m \u001b[1mr\u001b[0m\u001b[39m" ], [ 0.138384, "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" ], [ 0.197658, "\b\u001b[1me\u001b[1mcognized that most files did not change and deduplicated them.\u001b[0m\u001b[39m" ], [ 1.432604, "\u001b[?1l\u001b>" ], [ 0.000397, "\u001b[?2004l\r\r\n" ], [ 0.00069, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 6.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.1e-05, "\u001b[?1h\u001b=" ], [ 0.000319, "\u001b[?2004h" ], [ 1.153873, "\u001b[?1l\u001b>" ], [ 0.000537, "\u001b[?2004l\r\r\n" ], [ 0.000623, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000101, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000108, "\u001b[?1h\u001b=" ], [ 0.000309, "\u001b[?2004h" ], [ 0.447325, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.257975, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.210602, "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" ], [ 0.182148, "\b\u001b[1mB\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.159923, "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.165905, "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" ], [ 0.175925, "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" ], [ 0.116184, "\b\u001b[1mw\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.125029, "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.110311, "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.26718, "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" ], [ 0.393846, "\b\u001b[1m \u001b[1mhappens, when we move a dir and create a new backup?\u001b[0m\u001b[39m" ], [ 1.840157, "\u001b[?1l\u001b>" ], [ 0.000398, "\u001b[?2004l\r\r\n" ], [ 0.000678, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000105, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000102, "\u001b[?1h\u001b=" ], [ 0.000242, "\u001b[?2004h" ], [ 1.044202, "\u001b[1m\u001b[31mm\u001b[0m\u001b[39m" ], [ 0.167573, "\b\u001b[0m\u001b[32mm\u001b[32mv\u001b[39m" ], [ 0.203794, " " ], [ 0.199502, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.002962, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.399299, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.004451, "\r\r\u001b[14C\u001b[0m\u001b[4m/\u001b[24m" ], [ 0.000168, "\r\r\n\u001b[J" ], [ 3.2e-05, "\u001b[38;5;33m2048example\u001b[0m/ \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/\u001b[24m\u001b[K" ], [ 0.416097, "\u001b[?7l" ], [ 1.3e-05, "\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.002339, "\u001b[10D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/2048example\u001b[1m/\u001b[0m" ], [ 0.000184, "\r\r\n" ], [ 0.000156, "\u001b[7m2048example/ \u001b[0m \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \u001b[K\r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K" ], [ 0.23342, "\r\r\n" ], [ 1.4e-05, "\u001b[7m2048example/ \u001b[0m \r\u001b[7m2048example/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K\u001b[12Dbigcollecti\u001b[0mon\u001b[1m/\u001b[0m" ], [ 0.000154, "\r\r\n" ], [ 2.5e-05, "\u001b[38;5;33m2048example\u001b[0m/ \r\u001b[1B\u001b[7mbigcollection/\u001b[0m \r\u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m Wallpaper/bigcollection\u001b[1m/\u001b[0m\u001b[K" ], [ 0.378809, "\r\r\n\u001b[J\u001b[A\u001b[29C" ], [ 0.002159, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m\u001b[K\u001b[1C" ], [ 0.35586, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.007824, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.248908, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.002608, "\r\r\u001b[38C\u001b[0m\u001b[4m/\u001b[24m" ], [ 0.000171, "\r\r\n\u001b[J" ], [ 5.4e-05, "\u001b[38;5;33m2048example\u001b[0m/ \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m \u001b[4mWallpaper/\u001b[24m\u001b[K" ], [ 0.248788, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.004567, "\u001b[10D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/2048example\u001b[1m/\u001b[0m" ], [ 0.000182, "\r\r\n" ], [ 9.1e-05, "\u001b[7m2048example/ \u001b[0m \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \u001b[K\r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K" ], [ 0.24704, "\r\r\n" ], [ 3.2e-05, "\u001b[7m2048example/ \u001b[0m \r\u001b[7m2048example/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K\u001b[12Dbigcollecti\u001b[0mon\u001b[1m/\u001b[0m" ], [ 0.000389, "\r\r\n" ], [ 3e-05, "\u001b[38;5;33m2048example\u001b[0m/ \r\u001b[1B\u001b[7mbigcollection/\u001b[0m \r\u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m Wallpaper/bigcollection\u001b[1m/\u001b[0m\u001b[K" ], [ 0.595335, "\r\r\n\u001b[J\u001b[A\u001b[53C" ], [ 0.003755, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m \u001b[4mWallpaper/bigcollection\u001b[24m\u001b[K\u001b[1C" ], [ 0.271014, "\b" ], [ 0.554135, "\u001b[23D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/\u001b[24mb\u001b[24mi\u001b[24mg\u001b[24mc\u001b[24mo\u001b[24ml\u001b[24ml\u001b[24me\u001b[24mc\u001b[24mt\u001b[24mi\u001b[24mo\u001b[24mn_" ], [ 0.317529, "N" ], [ 0.104435, "E" ], [ 0.175308, "W" ], [ 0.956051, "\u001b[?1l\u001b>" ], [ 0.001192, "\u001b[?2004l\r\r\n" ], [ 0.000754, "\u001b]2;mv -i Wallpaper/bigcollection Wallpaper/bigcollection_NEW\u0007\u001b]1;mv\u0007" ], [ 0.001182, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 9.8e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.7e-05, "\u001b[?1h\u001b=" ], [ 0.000188, "\u001b[?2004h" ], [ 1.857261, "\u001b[32mmv\u001b[39m Wallpaper/bigcollection \u001b[4mWallpaper/bigcollection_NEW\u001b[24m" ], [ 0.208181, "\u001b[54D\u001b[1m#\u001b[1m \u001b[1mB\u001b[1mu\u001b[1mt\u001b[1m \u001b[1mw\u001b[1mh\u001b[1ma\u001b[1mt\u001b[1m \u001b[1mh\u001b[1ma\u001b[1mp\u001b[1mp\u001b[1me\u001b[1mn\u001b[1ms\u001b[1m,\u001b[1m \u001b[1mw\u001b[1mh\u001b[1me\u001b[1mn\u001b[1m \u001b[1mw\u001b[1me\u001b[24m\u001b[1m \u001b[24m\u001b[1mm\u001b[24m\u001b[1mo\u001b[24m\u001b[1mv\u001b[24m\u001b[1me\u001b[24m\u001b[1m \u001b[24m\u001b[1ma\u001b[24m\u001b[1m \u001b[24m\u001b[1md\u001b[24m\u001b[1mi\u001b[24m\u001b[1mr\u001b[24m\u001b[1m \u001b[24m\u001b[1ma\u001b[24m\u001b[1mn\u001b[24m\u001b[1md\u001b[24m\u001b[1m \u001b[24m\u001b[1mc\u001b[24m\u001b[1mr\u001b[24m\u001b[1me\u001b[24m\u001b[1ma\u001b[24m\u001b[1mt\u001b[24m\u001b[1me\u001b[24m\u001b[1m \u001b[24m\u001b[1ma\u001b[24m\u001b[1m \u001b[24m\u001b[1mn\u001b[24m\u001b[1me\u001b[1mw backup?\u001b[0m\u001b[39m" ], [ 0.2399, "\u001b[60D\u001b[1mo\u001b[1mr\u001b[1mg\u001b[1m \u001b[1mr\u001b[1me\u001b[1mc\u001b[1mo\u001b[1mg\u001b[1mn\u001b[1mi\u001b[1mz\u001b[1me\u001b[1md\u001b[1m \u001b[1mt\u001b[1mh\u001b[1ma\u001b[1mt\u001b[1m \u001b[1mm\u001b[1mo\u001b[1ms\u001b[1mt\u001b[1m \u001b[1mf\u001b[1mi\u001b[1ml\u001b[1me\u001b[1ms\u001b[1m \u001b[1md\u001b[1mi\u001b[1md\u001b[1m \u001b[1mn\u001b[1mo\u001b[1mt\u001b[1m \u001b[1mc\u001b[1mh\u001b[1ma\u001b[1mn\u001b[1mg\u001b[1me\u001b[1m \u001b[1ma\u001b[1mn\u001b[1md\u001b[1m \u001b[1md\u001b[1me\u001b[1md\u001b[1mu\u001b[1mp\u001b[1ml\u001b[1mi\u001b[1mc\u001b[1ma\u001b[1mt\u001b[1med them.\u001b[0m\u001b[39m" ], [ 0.227963, "\u001b[69D\u001b[1mN\u001b[1mo\u001b[1mt\u001b[1mi\u001b[1mc\u001b[1me\u001b[1m \u001b[1mt\u001b[1mh\u001b[1me\u001b[1m \u001b[1m\"\u001b[1mD\u001b[2C\u001b[0m\u001b[39m\u001b[39P\u001b[10C\u001b[1ms\u001b[1mi\u001b[1mz\u001b[1me\u001b[1m\"\u001b[1m in \"This archive\"?\u001b[0m\u001b[39m \u001b[20D" ], [ 0.344233, "\u001b[49D\u001b[1mW\u001b[1mo\u001b[1mw\u001b[1m,\u001b[1m \u001b[1mt\u001b[1mh\u001b[1mi\u001b[1ms\u001b[1m \u001b[1mw\u001b[1ma\u001b[1ms\u001b[1m \u001b[1ma\u001b[1m \u001b[1ml\u001b[1mo\u001b[1mt\u001b[1m \u001b[1mf\u001b[1ma\u001b[1ms\u001b[1mt\u001b[1me\u001b[1mr\u001b[1m!\u001b[0m\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[22D" ], [ 0.396096, "\u001b[29D\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39mc\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39me\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ms\u001b[0m\u001b[39mt\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39ms\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39mp\u001b[0m\u001b[39mr\u001b[0m\u001b[39mo\u001b[0m\u001b[39mg\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39mss --compression lz4 /media/backup/borgdemo::backup2 \u001b[4mWallpaper\u001b[24m\u001b[K" ], [ 0.854343, "\b" ], [ 0.192067, "\b" ], [ 0.161921, "\b" ], [ 0.152949, "\b" ], [ 0.158914, "\b" ], [ 0.150013, "\b" ], [ 0.168061, "\b" ], [ 0.170964, "\b" ], [ 0.156237, "\b" ], [ 0.161813, "\b" ], [ 0.698972, "\b\u001b[P\u001b[10C \u001b[11D" ], [ 0.185005, "3\u001b[24m \u001b[4mW\u001b[4ma\u001b[4ml\u001b[4ml\u001b[4mp\u001b[4ma\u001b[4mp\u001b[4me\u001b[4mr\u001b[24m\u001b[10D" ], [ 0.670037, "\u001b[?1l\u001b>" ], [ 0.002029, "\u001b[?2004l\r\r\n" ], [ 0.000793, "\u001b]2;borg create --stats --progress --compression lz4 Wallpaper\u0007\u001b]1;borg\u0007" ], [ 0.621587, "Enter passphrase for key /media/backup/borgdemo: " ], [ 5.034162, "\r\n" ], [ 0.136527, "0 B O 0 B C 0 B D 0 N Wallpaper \r" ], [ 0.028491, "Initializing cache transaction: Reading config \r" ], [ 0.000245, "Initializing cache transaction: Reading chunks \r" ], [ 0.000278, "Initializing cache transaction: Reading files \r" ], [ 0.000296, " \r" ], [ 0.173817, "10.07 MB O 10.04 MB C 0 B D 17 N Wallpaper/bigcollec...rland__England____A.jpg\r" ], [ 0.20311, "29.10 MB O 29.05 MB C 0 B D 50 N Wallpaper/bigcollec...Creek_Redwoods_Stat.jpg\r" ], [ 0.202422, "47.67 MB O 47.62 MB C 0 B D 83 N Wallpaper/bigcollec...rson-Wildschutzgebi.jpg\r" ], [ 0.216811, "64.30 MB O 64.19 MB C 0 B D 112 N Wallpaper/bigcollec..._Planten_un_Blomen.jpg\r" ], [ 0.214409, "80.89 MB O 80.75 MB C 0 B D 140 N Wallpaper/bigcollec...g__Cologne__German.jpg\r" ], [ 0.202244, "100.45 MB O 100.26 MB C 0 B D 173 N Wallpaper/bigcolle..._Menorca__Spanien.jpg\r" ], [ 0.202027, "116.80 MB O 116.61 MB C 0 B D 202 N Wallpaper/bigcolle...artenkirchen__Bay.jpg\r" ], [ 0.202003, "130.38 MB O 130.15 MB C 0 B D 227 N Wallpaper/bigcolle..._zur_Felsenkirche.jpg\r" ], [ 0.234918, "143.32 MB O 143.09 MB C 0 B D 251 N Wallpaper/bigcolle...land__Antarktis__.jpg\r" ], [ 0.204976, "156.31 MB O 156.07 MB C 0 B D 275 N Wallpaper/bigcolle...-Stadion__Rio_de_.jpg\r" ], [ 0.205408, "173.36 MB O 173.09 MB C 0 B D 304 N Wallpaper/bigcolle...lpark__Alaska__US.jpg\r" ], [ 0.221776, "183.65 MB O 183.35 MB C 0 B D 322 N Wallpaper/bigcolle...lmeer____Pasquale.jpg\r" ], [ 0.201052, "195.95 MB O 195.63 MB C 0 B D 345 N Wallpaper/bigcolle...Schutzgebiet_Mary.jpg\r" ], [ 0.240687, "217.22 MB O 216.88 MB C 0 B D 382 N Wallpaper/bigcolle...__Kappadokien__T_.jpg\r" ], [ 0.20767, "233.09 MB O 232.68 MB C 0 B D 409 N Wallpaper/bigcolle...epublic_ImagesShu.jpg\r" ], [ 0.210433, "250.21 MB O 249.81 MB C 0 B D 439 N Wallpaper/bigcolle...ter__Pr_fektur_Fu.jpg\r" ], [ 0.200954, "268.90 MB O 268.51 MB C 0 B D 472 N Wallpaper/bigcolle...uth_Carolina__USA.jpg\r" ], [ 0.212828, "286.72 MB O 286.35 MB C 0 B D 502 N Wallpaper/bigcolle...l_Park__Cobham__E.jpg\r" ], [ 0.206527, "296.84 MB O 296.47 MB C 0 B D 518 N Wallpaper/bigcolle...entAlamy______Bin.jpg\r" ], [ 0.205003, "310.38 MB O 310.00 MB C 0 B D 542 N Wallpaper/bigcolle...ationalpark__Flor.jpg\r" ], [ 0.209538, "320.38 MB O 320.03 MB C 0 B D 559 N Wallpaper/bigcolle...ma__Bahamas____Ji.jpg\r" ], [ 0.201896, "331.76 MB O 331.35 MB C 0 B D 580 N Wallpaper/bigcolle...rd_Bay__Eyre-Halb.jpg\r" ], [ 0.207585, "347.40 MB O 346.96 MB C 0 B D 606 N Wallpaper/bigcolle...s_Atlantischen_Oz.jpg\r" ], [ 0.200781, "369.05 MB O 368.62 MB C 0 B D 640 N Wallpaper/bigcolle...ankreich____John_.jpg\r" ], [ 0.202326, "379.22 MB O 378.78 MB C 0 B D 657 N Wallpaper/bigcolle...chtanemone__Insel.jpg\r" ], [ 0.211929, "389.83 MB O 389.36 MB C 0 B D 676 N Wallpaper/bigcolle...ugal____Mikael_Sv.jpg\r" ], [ 0.219553, "402.12 MB O 401.68 MB C 0 B D 695 N Wallpaper/bigcolle...rk_Sarek__Schwede.jpg\r" ], [ 0.20375, "416.03 MB O 415.48 MB C 0 B D 718 N Wallpaper/bigcolle...em_taubenetzten_G.jpg\r" ], [ 0.201474, "428.93 MB O 428.38 MB C 0 B D 742 N Wallpaper/bigcolle...Francisco_Bay__Ka.jpg\r" ], [ 0.200248, "437.92 MB O 437.35 MB C 0 B D 756 N Wallpaper/bigcolle..._der_N_he_von_Tro.jpg\r" ], [ 0.215254, "446.04 MB O 445.46 MB C 0 B D 770 N Wallpaper/bigcolle...enver__Colorado__.jpg\r" ], [ 0.202133, "455.95 MB O 455.36 MB C 0 B D 787 N Wallpaper/bigcolle..._Son_Doong-H_hle_.jpg\r" ], [ 0.208499, "471.36 MB O 470.71 MB C 0 B D 816 N Wallpaper/bigcolle...ly_National_Monum.jpg\r" ], [ 0.205116, "491.46 MB O 490.81 MB C 0 B D 853 N Wallpaper/bigcolle...ted_during_the_ 1.jpg\r" ], [ 0.220215, "510.73 MB O 510.07 MB C 0 B D 887 N Wallpaper/bigcolle..._Blitzeinschlag_i.jpg\r" ], [ 0.201825, "522.32 MB O 521.65 MB C 0 B D 906 N Wallpaper/bigcolle...vador__Santiago__.jpg\r" ], [ 0.202937, "534.02 MB O 533.34 MB C 0 B D 925 N Wallpaper/bigcolle...doah_National_Par.jpg\r" ], [ 0.202635, "550.50 MB O 549.83 MB C 0 B D 951 N Wallpaper/bigcolle...liffs_National_Mo.jpg\r" ], [ 0.202296, "564.18 MB O 563.47 MB C 0 B D 976 N Wallpaper/bigcolle...n_in_Aktion____Va.jpg\r" ], [ 0.203791, "576.43 MB O 575.71 MB C 0 B D 996 N Wallpaper/bigcolle...______WRIGHTSuper.jpg\r" ], [ 0.439796, "Compacting segments 0% \r" ], [ 0.000919, "Compacting segments 50% \r" ], [ 3.7e-05, " \r" ], [ 0.040817, "Saving files cache \r" ], [ 0.010023, "Saving chunks cache \r" ], [ 0.000278, "Saving cache config \r" ], [ 0.093829, " \r" ], [ 1.6e-05, " \r" ], [ 0.000308, "------------------------------------------------------------------------------\r\n" ], [ 9e-06, "Archive name: backup3\r\n" ], [ 3.8e-05, "Archive fingerprint: 36cd8fdf9b8b2e3bbb3fc2bb600acd48609efaf3a0880f900e0701a47ff69d4d\r\n" ], [ 2e-05, "Time (start): Fri, 2017-07-14 21:55:37\r\n" ], [ 2.4e-05, "Time (end): Fri, 2017-07-14 21:55:46\r\n" ], [ 2.2e-05, "Duration: 8.58 seconds\r\n" ], [ 2.6e-05, "Number of files: 1051\r\n" ], [ 2.6e-05, "Utilization of maximum supported archive size: 0%\r\n" ], [ 2.1e-05, "------------------------------------------------------------------------------\r\n" ], [ 2.6e-05, " Original size Compressed size Deduplicated size\r\n" ], [ 2.4e-05, "This archive: 618.96 MB 617.47 MB 107.55 kB\r\n" ], [ 2.1e-05, "All archives: 1.86 GB 1.85 GB 561.88 MB\r\n" ], [ 2.5e-05, "\r\n" ], [ 3.9e-05, " Unique chunks Total chunks\r\n" ], [ 1.1e-05, "Chunk index: 1006 3283\r\n" ], [ 4.8e-05, "------------------------------------------------------------------------------\r\n" ], [ 0.048607, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 1.9e-05, "\u001b]1;~/Pictures\u0007" ], [ 7.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.2e-05, "\u001b[?1h\u001b=" ], [ 0.00017, "\u001b[?2004h" ], [ 1.509372, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.261334, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.25826, "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" ], [ 0.162616, "\b\u001b[1mS\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.27891, "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.174723, "\b\u001b[1mi\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.124142, "\b\u001b[1ml\u001b[1ml\u001b[0m\u001b[39m" ], [ 1.012371, "\b\u001b[1ml\u001b[1m quite fast…\u001b[0m\u001b[39m" ], [ 0.74493, "\u001b[?1l\u001b>" ], [ 0.000416, "\u001b[?2004l\r\r\n" ], [ 0.000686, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 6e-06, "\u001b]1;~/Pictures\u0007" ], [ 8.8e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000111, "\u001b[?1h\u001b=" ], [ 0.000271, "\u001b[?2004h" ], [ 2.038818, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.861519, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 2.235116, "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" ], [ 0.20981, "\b\u001b[1mB\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.216676, "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.72822, "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" ], [ 1.094756, "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" ], [ 0.315528, "\b\u001b[1mw\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.23713, "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" ], [ 0.286805, "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.638764, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.81778, "\b\u001b[1m \u001b[1my\u001b[0m\u001b[39m" ], [ 0.245269, "\b\u001b[1my\u001b[1mou look at the \"deduplicated file size\" again, you see that borg\u001b[1m \u001b[1malso recognized that only the dir and not the files changed in this backup.\u001b[0m\u001b[39m\u001b[K" ], [ 2.34618, "\u001b[?1l\u001b>" ], [ 0.000453, "\u001b[?2004l\r\r\n" ], [ 0.000631, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00011, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000113, "\u001b[?1h\u001b=" ], [ 0.000262, "\u001b[?2004h" ], [ 3.418707, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.275819, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.2004, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.172829, "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.308378, "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" ], [ 0.703684, "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" ], [ 0.8183, "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" ], [ 0.193322, "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" ], [ 0.18438, "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.389996, "\b\u001b[1mt\u001b[1m's look into a repo.\u001b[0m\u001b[39m" ], [ 0.857879, "\u001b[?1l\u001b>" ], [ 0.000349, "\u001b[?2004l\r\r\n" ], [ 0.000564, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 2.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.9e-05, "\u001b[?1h\u001b=" ], [ 0.000246, "\u001b[?2004h" ], [ 1.60039, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.177554, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.117613, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.12982, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.145309, " " ], [ 0.256078, "l" ], [ 0.145029, "i" ], [ 0.100415, "s" ], [ 0.137667, "t" ], [ 0.172051, " " ], [ 0.490083, "\u001b[4m/\u001b[24m" ], [ 0.190449, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.216676, "\b\u001b[4mm\u001b[4me\u001b[24m" ], [ 0.174909, "\b\u001b[4me\u001b[4md\u001b[24m" ], [ 0.242368, "\u001b[?7l\u001b[31m......\u001b[39m" ], [ 3.2e-05, "\u001b[?7h" ], [ 0.00599, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.345758, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003294, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.253376, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003389, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 1.036958, "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" ], [ 2.6e-05, "\u001b[?1l\u001b>" ], [ 0.000854, "\u001b[?2004l\r\r\n" ], [ 0.000717, "\u001b]2;borg list /media/backup/borgdemo\u0007\u001b]1;borg\u0007" ], [ 0.624291, "Enter passphrase for key /media/backup/borgdemo: " ], [ 2.363577, "\r\n" ], [ 0.158203, "backup1 Fri, 2017-07-14 21:54:06 [9758c7db339a066360bffad17b2ffac4fb368c6722c0be3a47a7a9b631f06407]\r\nbackup2 Fri, 2017-07-14 21:54:56 [5aaf03d1c710cf774f9c9ff1c6317b621c14e519c6bac459f6d64b31e3bbd200]\r\nbackup3 Fri, 2017-07-14 21:55:37 [36cd8fdf9b8b2e3bbb3fc2bb600acd48609efaf3a0880f900e0701a47ff69d4d]\r\n" ], [ 0.044143, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 5.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.4e-05, "\u001b[?1h\u001b=" ], [ 0.000207, "\u001b[?2004h" ], [ 5.582312, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.371134, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.184918, "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" ], [ 0.177123, "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.148041, "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.461676, "\b\u001b[1mu\u001b[1m'\u001b[0m\u001b[39m" ], [ 0.668888, "\b\u001b[1m'\u001b[1mll see a list of all backups.\u001b[0m\u001b[39m" ], [ 0.876235, "\u001b[?1l\u001b>" ], [ 0.000363, "\u001b[?2004l\r\r\n" ], [ 0.001075, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 8.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.8e-05, "\u001b[?1h\u001b=" ], [ 0.000297, "\u001b[?2004h" ], [ 2.475491, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.382591, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.23474, "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" ], [ 0.210269, "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.196151, "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.460253, "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" ], [ 0.305764, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.184098, "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.212534, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.305097, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.163485, "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.194803, "\b\u001b[1ma\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.282791, "\b\u001b[1ml\u001b[1mso use the same command to look into an archive. But we better f\u001b[1mi\u001b[1mlter the output here:\u001b[0m\u001b[39m\u001b[K" ], [ 2.679252, "\u001b[?1l\u001b>" ], [ 0.000434, "\u001b[?2004l\r\r\n" ], [ 0.000646, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000107, "\u001b[?1h\u001b=" ], [ 0.000302, "\u001b[?2004h" ], [ 1.162094, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.184756, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.114887, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.143983, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.230507, " " ], [ 0.414382, "l" ], [ 0.153591, "i" ], [ 0.044178, "s" ], [ 0.236299, "t" ], [ 0.330148, " " ], [ 0.70018, "\u001b[4m/\u001b[24m" ], [ 0.193582, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.172118, "\b\u001b[4mm\u001b[4me\u001b[24m" ], [ 0.134283, "\b\u001b[4me\u001b[4md\u001b[24m" ], [ 0.250757, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.006227, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.374078, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003992, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.2609, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003434, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.237963, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003371, "\r\r\u001b[34C\u001b[0m\u001b[4m/\u001b[24m" ], [ 0.000178, "\r\r\n\u001b[J" ], [ 4.2e-05, "\u001b[0mREADME \u001b[38;5;33mdata\u001b[0m/ index.14 nonce \r\n\u001b[Jconfig \u001b[Jhints.14 \u001b[Jintegrity.14 \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mborg\u001b[39m list \u001b[4m/media/backup/borgdemo/\u001b[24m\u001b[K" ], [ 0.833604, "\b\b\u001b[4mo\u001b[24m\u001b[24m \b" ], [ 1.042199, "\u001b[22D\u001b[24m/\u001b[24mm\u001b[24me\u001b[24md\u001b[24mi\u001b[24ma\u001b[24m/\u001b[24mb\u001b[24ma\u001b[24mc\u001b[24mk\u001b[24mu\u001b[24mp\u001b[24m/\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24md\u001b[24me\u001b[24mm\u001b[24mo:" ], [ 0.139477, ":" ], [ 0.711096, "b" ], [ 0.099664, "a" ], [ 0.149912, "c" ], [ 0.16888, "k" ], [ 0.923931, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.005451, "\r\r\r\r\n\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list /media/backup/borgdemo::back\u001b[K" ], [ 0.885297, "u" ], [ 0.29853, "p" ], [ 0.456244, "3" ], [ 1.061844, " " ], [ 0.589511, "|" ], [ 0.527539, " " ], [ 0.343662, "\u001b[32mg\u001b[39m" ], [ 0.117117, "\b\u001b[32mg\u001b[32mr\u001b[39m" ], [ 0.124331, "\b\b\u001b[1m\u001b[31mg\u001b[1m\u001b[31mr\u001b[1m\u001b[31me\u001b[0m\u001b[39m" ], [ 0.726149, "\b\b\b\u001b[0m\u001b[32mg\u001b[0m\u001b[32mr\u001b[0m\u001b[32me\u001b[32mp\u001b[39m" ], [ 0.198601, " " ], [ 0.476336, "\u001b[33m'\u001b[39m" ], [ 0.392009, "\b\u001b[33m'\u001b[33md\u001b[39m" ], [ 0.627529, "\b\u001b[33md\u001b[33me\u001b[39m" ], [ 0.142332, "\b\u001b[33me\u001b[33me\u001b[39m" ], [ 0.322681, "\b\u001b[33me\u001b[33mr\u001b[39m" ], [ 0.916328, "\b\u001b[33mr\u001b[33m.\u001b[39m" ], [ 0.50653, "\b\u001b[33m.\u001b[33mj\u001b[39m" ], [ 0.242318, "\b\u001b[33mj\u001b[33mp\u001b[39m" ], [ 0.272214, "\b\u001b[33mp\u001b[33mg\u001b[39m" ], [ 0.581098, "\b\u001b[33mg\u001b[33m'\u001b[39m" ], [ 2.559186, "\u001b[?1l\u001b>" ], [ 0.001382, "\u001b[?2004l\r\r\n" ], [ 0.000773, "\u001b]2;borg list /media/backup/borgdemo::backup3 | grep --color 'deer.jpg'\u0007\u001b]1;borg\u0007" ], [ 0.628501, "Enter passphrase for key /media/backup/borgdemo: " ], [ 2.584332, "\r\n" ], [ 0.141205, "-rw-rw-r-- rugk rugk 3781749 Fri, 2017-07-14 17:01:45 Wallpaper/\u001b[01;31m\u001b[Kdeer.jpg\u001b[m\u001b[K\r\n" ], [ 0.054041, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000135, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 2.7e-05, "\u001b[?1h\u001b=" ], [ 0.00017, "\u001b[?2004h" ], [ 2.222435, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.269828, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.211035, "\b\b\u001b[1m#\u001b[1m \u001b[1mO\u001b[0m\u001b[39m" ], [ 0.184712, "\b\u001b[1mO\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.374912, "\b\u001b[1mh\u001b[1m, we found our picture. Now extract it:\u001b[0m\u001b[39m" ], [ 1.545747, "\u001b[?1l\u001b>" ], [ 0.000418, "\u001b[?2004l\r\r\n" ], [ 0.00063, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 3.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000135, "\u001b[?1h\u001b=" ], [ 0.000463, "\u001b[?2004h" ], [ 1.638625, "\u001b[1m\u001b[31mm\u001b[0m\u001b[39m" ], [ 0.156977, "\b\u001b[0m\u001b[32mm\u001b[32mv\u001b[39m" ], [ 0.220013, " " ], [ 0.151118, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.002944, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.668654, "\b\b\u001b[4mr\u001b[24m\u001b[0m\u001b[24m " ], [ 0.297169, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.005693, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper\u001b[24m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.672973, "\b\b\u001b[4mr\u001b[24m\u001b[0m\u001b[24m \b" ], [ 0.263416, "\u001b[9D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr." ], [ 0.334671, "o" ], [ 0.19768, "r" ], [ 0.142283, "i" ], [ 0.17833, "g" ], [ 0.688576, "\u001b[?1l\u001b>" ], [ 0.001806, "\u001b[?2004l\r\r\n" ], [ 0.000954, "\u001b]2;mv -i Wallpaper Wallpaper.orig\u0007\u001b]1;mv\u0007" ], [ 0.002076, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 5.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7e-05, "\u001b[?1h\u001b=" ], [ 0.000153, "\u001b[?2004h" ], [ 1.864942, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.18048, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.143872, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.161829, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.170439, " " ], [ 0.248909, "c" ], [ 0.365319, "\b \b" ], [ 0.142233, "e" ], [ 0.157272, "x" ], [ 0.166861, "t" ], [ 0.115114, "r" ], [ 0.103674, "a" ], [ 0.102162, "c" ], [ 0.163264, "t" ], [ 0.308166, " " ], [ 1.386497, "\u001b[4m/\u001b[24m" ], [ 0.183134, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.115533, "\b\u001b[4mm\u001b[4me\u001b[24m" ], [ 0.12416, "\b\u001b[4me\u001b[4md\u001b[24m" ], [ 0.206989, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003179, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m extract \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.241808, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003324, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m extract \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.193552, "\u001b[?7l" ], [ 2.6e-05, "\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003368, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m extract \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.700774, "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" ], [ 1.151074, "\u001b[22D\u001b[24m/\u001b[24mm\u001b[24me\u001b[24md\u001b[24mi\u001b[24ma\u001b[24m/\u001b[24mb\u001b[24ma\u001b[24mc\u001b[24mk\u001b[24mu\u001b[24mp\u001b[24m/\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24md\u001b[24me\u001b[24mm\u001b[24mo:" ], [ 0.146222, ":" ], [ 0.579644, "b" ], [ 0.102789, "a" ], [ 0.178851, "c" ], [ 0.133936, "k" ], [ 0.124089, "u" ], [ 0.229823, "p" ], [ 0.174738, "3" ], [ 0.306821, " " ], [ 4.287483, "\u001b[45D\u001b[39mb\u001b[39mo\u001b[39mr\u001b[39mg\u001b[41C\u001b[7mWallpaper/deer.jpg\u001b[27m" ], [ 1.718396, "\u001b[63D\u001b[32mb\u001b[32mo\u001b[32mr\u001b[32mg\u001b[39m\u001b[41C\u001b[27mW\u001b[27ma\u001b[27ml\u001b[27ml\u001b[27mp\u001b[27ma\u001b[27mp\u001b[27me\u001b[27mr\u001b[27m/\u001b[27md\u001b[27me\u001b[27me\u001b[27mr\u001b[27m.\u001b[27mj\u001b[27mp\u001b[27mg" ], [ 6.4e-05, "\u001b[?1l\u001b>" ], [ 0.001749, "\u001b[?2004l\r\r\n" ], [ 0.000991, "\u001b]2;borg extract /media/backup/borgdemo::backup3 Wallpaper/deer.jpg\u0007\u001b]1;borg\u0007" ], [ 0.633044, "Enter passphrase for key /media/backup/borgdemo: " ], [ 2.659432, "\r\n" ], [ 0.198939, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000134, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.9e-05, "\u001b[?1h\u001b=" ], [ 0.000169, "\u001b[?2004h" ], [ 4.506682, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.287992, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.13604, "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" ], [ 0.132241, "\b\u001b[1mA\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.115152, "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" ], [ 0.190449, "\b\u001b[1md\u001b[1m \u001b[0m\u001b[39m" ], [ 0.168765, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.248816, "\b\u001b[1mc\u001b[1mheck that it's the same:\u001b[0m\u001b[39m" ], [ 1.093037, "\u001b[?1l\u001b>" ], [ 0.000401, "\u001b[?2004l\r\r\n" ], [ 0.000745, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.0001, "\u001b[?1h\u001b=" ], [ 0.000321, "\u001b[?2004h" ], [ 1.350298, "\u001b[32md\u001b[39m" ], [ 0.181769, "\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.148155, "\b\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[1m\u001b[31mf\u001b[0m\u001b[39m" ], [ 0.13874, "\b\b\b\u001b[0m\u001b[32md\u001b[0m\u001b[32mi\u001b[0m\u001b[32mf\u001b[32mf\u001b[39m" ], [ 0.321772, " " ], [ 0.410311, "-" ], [ 0.160707, "s" ], [ 0.223167, " " ], [ 0.856546, "\u001b[4mW\u001b[24m" ], [ 0.184551, "\b\u001b[4mW\u001b[4ma\u001b[24m" ], [ 0.211734, "\b\u001b[4ma\u001b[4ml\u001b[24m" ], [ 0.115481, "\b\u001b[4ml\u001b[4ml\u001b[24m" ], [ 0.13804, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.007132, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper\u001b[24m\u001b[K" ], [ 0.620064, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.004082, "\r\r\u001b[19C" ], [ 0.000148, "\r\r\n\u001b[J\u001b[J\u001b[38;5;33mWallpaper\u001b[0m/ \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper\u001b[24m\u001b[K" ], [ 0.83944, "\u001b[?7l" ], [ 2.4e-05, "\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.003487, "\u001b[9D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[1m/\u001b[0m" ], [ 0.000166, "\r\r\n\u001b[J\u001b[7mWallpaper/ \u001b[0m \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s Wallpaper\u001b[1m/\u001b[0m\u001b[K" ], [ 0.488495, "\r\r\n" ], [ 1.6e-05, "\u001b[7mWallpaper/ \u001b[0m \r\u001b[7mWallpaper/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s Wallpaper\u001b[1m/\u001b[0m\u001b[K\r\r\n\u001b[J\u001b[A\u001b[20C" ], [ 0.001959, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper\u001b[24m\u001b[K\u001b[1C" ], [ 0.285593, "\b" ], [ 0.303988, "\b\u001b[4mr\u001b[4m/\u001b[24m" ], [ 0.798187, "\b\u001b[4m/\u001b[4md\u001b[24m" ], [ 0.241007, "\b\u001b[4md\u001b[4me\u001b[24m" ], [ 0.21286, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.00579, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m\u001b[1m \u001b[0m\u001b[K" ], [ 1.289271, "\b\u001b[0m \u001b[4mW\u001b[24m" ], [ 0.148557, "\b\u001b[4mW\u001b[4ma\u001b[24m" ], [ 0.16621, "\b\u001b[4ma\u001b[4ml\u001b[24m" ], [ 0.097599, "\b\u001b[4ml\u001b[4ml\u001b[24m" ], [ 0.111176, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.005059, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper\u001b[24m\u001b[K" ], [ 0.431538, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.005176, "\r\r\u001b[38C" ], [ 0.000155, "\r\r\n\u001b[J\u001b[J\u001b[38;5;33mWallpaper\u001b[0m/ \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper\u001b[24m\u001b[K" ], [ 0.389092, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.004561, "\u001b[9D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[1m/\u001b[0m" ], [ 0.000155, "\r\r\n\u001b[J\u001b[7mWallpaper/ \u001b[0m \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C" ], [ 1.3e-05, "\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m Wallpaper\u001b[1m/\u001b[0m\u001b[K" ], [ 0.260844, "\r\r\n" ], [ 3.6e-05, "\u001b[7mWallpaper/ \u001b[0m \r\u001b[7mWallpaper/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m Wallpaper\u001b[1m/\u001b[0m\u001b[K\b\u001b[0m.orig\u001b[1m/\u001b[0m" ], [ 0.000163, "\r\r\n\u001b[17C\u001b[7mWallpaper.orig/\u001b[0m\r\u001b[38;5;33mWallpaper\u001b[0m/ \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m Wallpaper.orig\u001b[1m/\u001b[0m\u001b[K" ], [ 0.598634, "\r\r\n\u001b[J\u001b[A\u001b[44C" ], [ 0.002461, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper.orig\u001b[24m\u001b[K\u001b[1C" ], [ 0.275896, "\b" ], [ 0.321512, "\b\u001b[4mg\u001b[4m/\u001b[24m" ], [ 1.499007, "\b\u001b[4m/\u001b[4md\u001b[24m" ], [ 0.165243, "\b\u001b[4md\u001b[4me\u001b[24m" ], [ 0.260397, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.005274, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper.orig/deer.jpg\u001b[24m\u001b[1m \u001b[0m\u001b[K" ], [ 1.658125, "\b\u001b[0m \b" ], [ 1.5e-05, "\u001b[?1l\u001b>" ], [ 0.001138, "\u001b[?2004l\r\r\n" ], [ 0.000783, "\u001b]2;diff -s Wallpaper/deer.jpg Wallpaper.orig/deer.jpg\u0007\u001b]1;diff\u0007" ], [ 0.057035, "Files Wallpaper/deer.jpg and Wallpaper.orig/deer.jpg are identical\r\n" ], [ 0.000183, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000114, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 9.1e-05, "\u001b[?1h\u001b=" ], [ 0.000199, "\u001b[?2004h" ], [ 3.579542, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.624347, "\b\u001b[0m\u001b[39m \b" ], [ 0.353186, "\u001b[?1l\u001b>" ], [ 0.000351, "\u001b[?2004l\r\r\n" ], [ 0.0006, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 2.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.00013, "\u001b[?1h\u001b=" ], [ 0.000185, "\u001b[?2004h" ], [ 0.726522, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.358332, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.183839, "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" ], [ 0.150451, "\b\u001b[1mA\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.128839, "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" ], [ 0.583652, "\b\u001b[1md\u001b[1m,\u001b[0m\u001b[39m" ], [ 0.152149, "\b\u001b[1m,\u001b[1m \u001b[0m\u001b[39m" ], [ 0.240696, "\b\u001b[1m \u001b[1mo\u001b[0m\u001b[39m" ], [ 0.130032, "\b\u001b[1mo\u001b[1mf\u001b[0m\u001b[39m" ], [ 0.306901, "\b\u001b[1mf\u001b[1m \u001b[0m\u001b[39m" ], [ 0.181176, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.271007, "\b\u001b[1mc\u001b[1mourse, we can also create remote repos via ssh when borg is setup\u001b[1m \u001b[1mthere. This command creates a new remote repo in a subdirectory called \"demo\"\u001b[1m:\u001b[0m\u001b[39m\u001b[K" ], [ 2.040444, "\u001b[?1l\u001b>" ], [ 0.000423, "\u001b[?2004l\r\r\n" ], [ 0.000711, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 6.8e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.8e-05, "\u001b[?1h\u001b=" ], [ 0.000297, "\u001b[?2004h" ], [ 1.613372, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.204618, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.121257, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.228506, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.469213, " " ], [ 0.23811, "i" ], [ 0.139149, "n" ], [ 0.157285, "i" ], [ 0.219101, "t" ], [ 0.389153, " " ], [ 0.633813, "-" ], [ 0.102895, "-" ], [ 0.267338, "e" ], [ 0.244036, "n" ], [ 0.303722, "c" ], [ 0.117325, "r" ], [ 0.112606, "y" ], [ 0.250891, "p" ], [ 0.258828, "t" ], [ 0.276877, "i" ], [ 0.131491, "o" ], [ 0.206852, "n" ], [ 0.966102, "=" ], [ 0.388021, "r" ], [ 0.146133, "e" ], [ 0.176939, "p" ], [ 0.139187, "o" ], [ 0.273188, "k" ], [ 0.172429, "e" ], [ 0.306306, "y" ], [ 0.851125, " " ], [ 0.868971, "b" ], [ 0.261136, "o" ], [ 0.12143, "r" ], [ 0.15507, "g" ], [ 0.186684, "d" ], [ 0.141974, "e" ], [ 0.13004, "m" ], [ 0.172673, "o" ], [ 1.041475, "@" ], [ 0.536019, "r" ], [ 0.02293, "e" ], [ 0.223755, "m" ], [ 0.152859, "o" ], [ 0.222368, "t" ], [ 0.095106, "e" ], [ 0.33914, "s" ], [ 0.213902, "e" ], [ 0.136448, "r" ], [ 0.196228, "v" ], [ 0.171447, "e" ], [ 0.154296, "r" ], [ 1.151168, "." ], [ 0.198973, "e" ], [ 0.195428, "x" ], [ 0.163512, "a" ], [ 0.157805, "m" ], [ 0.174865, "p" ], [ 0.103133, "l" ], [ 0.145276, "e" ], [ 2.109373, ":" ], [ 0.494126, "." ], [ 0.315325, "/" ], [ 0.182218, "d" ], [ 0.138815, "e" ], [ 0.143066, "m" ], [ 0.17136, "o" ], [ 1.831712, "\u001b[?1l\u001b>" ], [ 0.001025, "\u001b[?2004l\r\r\n" ], [ 0.000824, "\u001b]2;borg init --encryption=repokey borgdemo@remoteserver.example:./demo\u0007\u001b]1;borg\u0007" ], [ 6.069586, "Enter new passphrase: " ], [ 2.598936, "\r\n" ], [ 0.000189, "Enter same passphrase again: " ], [ 2.044707, "\r\n" ], [ 0.000198, "Do you want your passphrase to be displayed for verification? [yN]: " ], [ 1.415539, "\r\n" ], [ 1.950077, "\r\nBy default repositories initialized with this version will produce security\r\nerrors if written to with an older version (up to and including Borg 1.0.8).\r\n\r\nIf you want to use these older versions, you can disable the check by running:\r\nborg upgrade --disable-tam 'ssh://borgdemo@remoteserver.example/./demo'\r\n\r\nSee https://borgbackup.readthedocs.io/en/stable/changes.html#pre-1-0-9-manifest-spoofing-vulnerability for details about the security implications.\r\n" ], [ 0.548386, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 9.5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000105, "\u001b[?1h\u001b=" ], [ 0.000221, "\u001b[?2004h" ], [ 0.82377, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.662248, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.610999, "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" ], [ 0.267513, "\b\u001b[1mE\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.185698, "\b\u001b[1ma\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.161855, "\b\u001b[1ms\u001b[1my\u001b[0m\u001b[39m" ], [ 0.46273, "\b\u001b[1my\u001b[1m, isn't it? That's all you need to know for basic usage.\u001b[0m\u001b[39m" ], [ 1.861984, "\u001b[?1l\u001b>" ], [ 0.001044, "\u001b[?2004l\r\r\n" ], [ 0.001525, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 6.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8e-05, "\u001b[?1h\u001b=" ], [ 0.000316, "\u001b[?2004h" ], [ 1.009133, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.240205, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.217287, "\b\b\u001b[1m#\u001b[1m \u001b[1mI\u001b[0m\u001b[39m" ], [ 0.163888, "\b\u001b[1mI\u001b[1mf\u001b[0m\u001b[39m" ], [ 0.349458, "\b\u001b[1mf\u001b[1m you want to see more, have a look at the screencast showing the \"advanc\u001b[1me\u001b[1md usage\".\u001b[0m\u001b[39m\u001b[K" ], [ 2.780664, "\u001b[?1l\u001b>" ], [ 0.000734, "\u001b[?2004l\r\r\n" ], [ 0.000812, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 4.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000113, "\u001b[?1h\u001b=" ], [ 0.000299, "\u001b[?2004h" ], [ 1.119856, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.281915, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.244389, "\b\b\u001b[1m#\u001b[1m \u001b[1mI\u001b[0m\u001b[39m" ], [ 0.143064, "\b\u001b[1mI\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.171731, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.139438, "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.388834, "\b\u001b[1ma\u001b[1mny case, enjoy using borg!\u001b[0m\u001b[39m" ], [ 1.502218, "\u001b[?1l\u001b>" ], [ 0.000883, "\u001b[?2004l\r\r\n" ], [ 0.000735, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 9.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000113, "\u001b[?1h\u001b=" ], [ 0.000498, "\u001b[?2004h" ], [ 1.273251, "\u001b[?2004l\r\r\n" ] ] } borgbackup-1.1.15/docs/misc/asciinema/advanced.json0000644000175000017500000044522513771325506022175 0ustar useruser00000000000000{ "version": 1, "width": 78, "height": 24, "duration": 446.783754, "command": null, "title": null, "env": { "TERM": "xterm-256color", "SHELL": "/bin/zsh" }, "stdout": [ [ 0.29658, "\b\u001b[1m$ # \u001b[1mFor the pro users, here are some advanced features of borg, so you can imp\u001b[1mr\u001b[1mess your friends. ;)\u001b[0m\u001b[39m\u001b[K" ], [ 1.025674, "\u001b[?1l\u001b>" ], [ 0.000375, "\u001b[?2004l\r\r\n" ], [ 0.000796, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000953, "\u001b]7;\u0007" ], [ 0.000799, "\u001b]7;\u0007" ], [ 7.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 4.6e-05, "\u001b[?1h\u001b=" ], [ 0.000368, "\u001b[?2004h" ], [ 0.857202, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.269836, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.277016, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.185115, "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.222294, "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.098908, "\b\u001b[1mt\u001b[1me\u001b[0m\u001b[39m" ], [ 0.471037, "\b\u001b[1me\u001b[1m:\u001b[0m\u001b[39m" ], [ 0.276132, "\b\u001b[1m:\u001b[1m This screencast was made with borg version 1.1.0 – older or newer bo\u001b[1mr\u001b[1mg versions may behave differently.\u001b[0m\u001b[39m\u001b[K" ], [ 1.063392, "\u001b[?1l\u001b>" ], [ 0.001402, "\u001b[?2004l\r\r\n" ], [ 0.001228, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.002846, "\u001b]7;\u0007" ], [ 0.002554, "\u001b]7;\u0007" ], [ 6.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000229, "\u001b[?1h\u001b=" ], [ 0.000858, "\u001b[?2004h" ], [ 0.944947, "\u001b[?1l\u001b>" ], [ 0.000319, "\u001b[?2004l\r\r\n" ], [ 0.000652, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001131, "\u001b]7;\u0007" ], [ 0.000871, "\u001b]7;\u0007" ], [ 9.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000117, "\u001b[?1h\u001b=" ], [ 0.00014, "\u001b[?2004h" ], [ 0.91046, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.350642, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.226284, "\b\b\u001b[1m#\u001b[1m \u001b[1mF\u001b[0m\u001b[39m" ], [ 0.190635, "\b\u001b[1mF\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.226298, "\b\u001b[1mi\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.094075, "\b\u001b[1mr\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.125931, "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.210409, "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" ], [ 0.333349, "\b\u001b[1m \u001b[1mof all, we can use several environment variables for borg.\u001b[0m\u001b[39m" ], [ 1.115007, "\u001b[?1l\u001b>" ], [ 0.000418, "\u001b[?2004l\r\r\n" ], [ 0.000665, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001185, "\u001b]7;\u0007" ], [ 0.00091, "\u001b]7;\u0007" ], [ 2.5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.9e-05, "\u001b[?1h\u001b=" ], [ 0.000298, "\u001b[?2004h" ], [ 1.193161, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.249128, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.253119, "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" ], [ 0.328187, "\b\u001b[1mE\u001b[1m.\u001b[0m\u001b[39m" ], [ 0.873845, "\b\u001b[1m.\u001b[1mg\u001b[0m\u001b[39m" ], [ 0.164238, "\b\u001b[1mg\u001b[1m.\u001b[0m\u001b[39m" ], [ 0.211331, "\b\u001b[1m.\u001b[1m \u001b[0m\u001b[39m" ], [ 0.15971, "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" ], [ 0.133833, "\b\u001b[1mw\u001b[1me\u001b[0m\u001b[39m" ], [ 2.95423, "\b\u001b[1me\u001b[1m do not want to type in our repo path and password again and again…" ], [ 1.769654, "\u001b[K" ], [ 2.7e-05, "\u001b[?1l\u001b>" ], [ 0.000616, "\u001b[?2004l\r\r\n" ], [ 0.000594, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00144, "\u001b]7;\u0007" ], [ 0.001172, "\u001b]7;\u0007" ], [ 3.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.2e-05, "\u001b[?1h\u001b=" ], [ 0.000419, "\u001b[?2004h" ], [ 0.975676, "\u001b[1m\u001b[31me\u001b[0m\u001b[39m" ], [ 0.156719, "\b\u001b[0m\u001b[32me\u001b[32mx\u001b[39m" ], [ 0.121911, "\b\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mx\u001b[1m\u001b[31mp\u001b[0m\u001b[39m" ], [ 0.15502, "\b\u001b[1m\u001b[31mp\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.26241, "\b\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.126933, "\b\b\b\b\b\u001b[0m\u001b[33me\u001b[0m\u001b[33mx\u001b[0m\u001b[33mp\u001b[0m\u001b[33mo\u001b[0m\u001b[33mr\u001b[33mt\u001b[39m" ], [ 0.192182, " " ], [ 0.304561, "B" ], [ 0.192073, "O" ], [ 0.136183, "R" ], [ 0.114362, "G" ], [ 0.576349, "_" ], [ 0.103719, "R" ], [ 0.113626, "E" ], [ 0.159395, "P" ], [ 0.141942, "O" ], [ 0.554082, "=" ], [ 0.74644, "'" ], [ 0.69222, "/" ], [ 0.20093, "m" ], [ 0.108068, "e" ], [ 0.125576, "d" ], [ 0.161298, "i" ], [ 0.107949, "a" ], [ 0.423969, "/" ], [ 0.623591, "b" ], [ 0.102775, "a" ], [ 0.146442, "c" ], [ 0.116202, "k" ], [ 0.133034, "u" ], [ 0.282831, "p" ], [ 0.436512, "/" ], [ 0.551147, "b" ], [ 0.208373, "o" ], [ 0.108883, "r" ], [ 0.137272, "g" ], [ 0.218057, "d" ], [ 0.122586, "e" ], [ 0.133605, "m" ], [ 0.170095, "o" ], [ 0.795644, "'" ], [ 0.928899, "\u001b[?1l\u001b>" ], [ 0.001469, "\u001b[?2004l\r\r\n" ], [ 0.000802, "\u001b]2;export BORG_REPO='/media/backup/borgdemo' \u0007\u001b]1;export\u0007" ], [ 0.000109, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001347, "\u001b]7;\u0007" ], [ 0.001006, "\u001b]7;\u0007" ], [ 5.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.2e-05, "\u001b[?1h\u001b=" ], [ 0.000186, "\u001b[?2004h" ], [ 0.718289, "\u001b[1m\u001b[31me\u001b[0m\u001b[39m" ], [ 0.19628, "\b\u001b[0m\u001b[32me\u001b[32mx\u001b[39m" ], [ 0.269637, "\b\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mx\u001b[1m\u001b[31mp\u001b[0m\u001b[39m" ], [ 0.164388, "\b\u001b[1m\u001b[31mp\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.332999, "\b\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.121063, "\b\b\b\b\b\u001b[0m\u001b[33me\u001b[0m\u001b[33mx\u001b[0m\u001b[33mp\u001b[0m\u001b[33mo\u001b[0m\u001b[33mr\u001b[33mt\u001b[39m" ], [ 0.265335, " " ], [ 0.311313, "B" ], [ 0.205307, "O" ], [ 0.159682, "R" ], [ 0.141683, "G" ], [ 0.553563, "_" ], [ 0.225583, "P" ], [ 0.10739, "A" ], [ 0.204722, "S" ], [ 0.145905, "S" ], [ 0.312666, "P" ], [ 0.311469, "H" ], [ 0.209393, "R" ], [ 0.069618, "A" ], [ 0.208505, "S" ], [ 0.202229, "E" ], [ 0.719142, "=" ], [ 0.61979, "'" ], [ 0.414834, "1" ], [ 0.208777, "2" ], [ 0.193519, "3" ], [ 0.171001, "4" ], [ 0.542373, "'" ], [ 0.876006, "\u001b[?1l\u001b>" ], [ 0.002877, "\u001b[?2004l\r\r\n" ], [ 0.001161, "\u001b]2;export BORG_PASSPHRASE='1234' \u0007\u001b]1;export\u0007" ], [ 8.5e-05, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.003438, "\u001b]7;\u0007" ], [ 0.002065, "\u001b]7;\u0007" ], [ 0.000146, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.9e-05, "\u001b[?1h\u001b=" ], [ 0.000508, "\u001b[?2004h" ], [ 1.238676, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.273221, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.248131, "\b\b\u001b[1m#\u001b[1m \u001b[1mP\u001b[0m\u001b[39m" ], [ 0.142137, "\b\u001b[1mP\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.089312, "\b\u001b[1mr\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.19919, "\b\u001b[1mo\u001b[1mb\u001b[0m\u001b[39m" ], [ 0.207691, "\b\u001b[1mb\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.105529, "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" ], [ 0.075159, "\b\u001b[1me\u001b[1mm\u001b[0m\u001b[39m" ], [ 0.625428, "\b\u001b[1mm\u001b[1m solved, borg will use this automatically… :)\u001b[0m\u001b[39m" ], [ 0.442303, "\u001b[?1l\u001b>" ], [ 0.0004, "\u001b[?2004l\r\r\n" ], [ 0.00077, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001065, "\u001b]7;\u0007" ], [ 0.001105, "\u001b]7;\u0007" ], [ 2.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.3e-05, "\u001b[?1h\u001b=" ], [ 0.000266, "\u001b[?2004h" ], [ 1.570802, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.218966, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.191279, "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" ], [ 0.144698, "\b\u001b[1mW\u001b[1me\u001b[0m\u001b[39m" ], [ 0.313061, "\b\u001b[1me\u001b[1m'\u001b[0m\u001b[39m" ], [ 0.245196, "\b\u001b[1m'\u001b[1mll use this right away…\u001b[0m\u001b[39m" ], [ 0.532339, "\u001b[?1l\u001b>" ], [ 0.000412, "\u001b[?2004l\r\r\n" ], [ 0.00062, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001169, "\u001b]7;\u0007" ], [ 0.00087, "\u001b]7;\u0007" ], [ 2.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000101, "\u001b[?1h\u001b=" ], [ 0.000279, "\u001b[?2004h" ], [ 0.63892, "\u001b[?1l\u001b>" ], [ 0.000369, "\u001b[?2004l\r\r\n" ], [ 0.00044, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.002911, "\u001b]7;\u0007" ], [ 0.002442, "\u001b]7;\u0007" ], [ 0.000162, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.8e-05, "\u001b[?1h\u001b=" ], [ 0.00059, "\u001b[?2004h" ], [ 0.548725, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.113549, "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.290577, "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.262532, "\b\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" ], [ 0.41846, "\b\u001b[1mA\u001b[1mDVANCED CREATION ##\u001b[0m\u001b[39m" ], [ 0.535376, "\u001b[?1l\u001b>" ], [ 0.001234, "\u001b[?2004l\r\r\n" ], [ 0.000938, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.002912, "\u001b]7;\u0007" ], [ 0.001987, "\u001b]7;\u0007" ], [ 7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000134, "\u001b[?1h\u001b=" ], [ 0.000671, "\u001b[?2004h" ], [ 0.759129, "\u001b[?1l\u001b>" ], [ 0.000397, "\u001b[?2004l\r\r\n" ], [ 0.000757, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001297, "\u001b]7;\u0007" ], [ 0.00131, "\u001b]7;\u0007" ], [ 3.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.0001, "\u001b[?1h\u001b=" ], [ 0.000135, "\u001b[?2004h" ], [ 0.425509, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.233111, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.185443, "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" ], [ 0.151433, "\b\u001b[1mW\u001b[1me\u001b[0m\u001b[39m" ], [ 0.157168, "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" ], [ 0.148414, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.200586, "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.145343, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.414343, "\b\u001b[1mn\u001b[1m also use some placeholders in our archive name…\u001b[0m\u001b[39m" ], [ 1.198174, "\u001b[?1l\u001b>" ], [ 0.000433, "\u001b[?2004l\r\r\n" ], [ 0.000647, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001494, "\u001b]7;\u0007" ], [ 0.001069, "\u001b]7;\u0007" ], [ 8.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 4.6e-05, "\u001b[?1h\u001b=" ], [ 0.000395, "\u001b[?2004h" ], [ 0.832499, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.186742, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.076839, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.15706, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.175773, " " ], [ 0.265231, "c" ], [ 0.198791, "r" ], [ 0.162497, "e" ], [ 0.08856, "a" ], [ 0.135865, "t" ], [ 0.112707, "e" ], [ 0.634063, " " ], [ 0.621186, "-" ], [ 0.112118, "-" ], [ 0.270276, "s" ], [ 0.135637, "t" ], [ 0.130994, "a" ], [ 0.086801, "t" ], [ 0.119778, "s" ], [ 0.24882, " " ], [ 0.47677, "-" ], [ 0.112232, "-" ], [ 0.26855, "p" ], [ 0.218974, "r" ], [ 0.14527, "o" ], [ 0.21975, "g" ], [ 0.104406, "r" ], [ 0.168975, "e" ], [ 0.224875, "s" ], [ 0.161557, "s" ], [ 0.556139, " " ], [ 0.90841, "-" ], [ 0.117065, "-" ], [ 0.268496, "c" ], [ 0.118758, "o" ], [ 0.13892, "m" ], [ 0.17322, "p" ], [ 0.146756, "r" ], [ 0.196139, "e" ], [ 0.249655, "s" ], [ 0.157202, "s" ], [ 0.236521, "i" ], [ 0.120624, "o" ], [ 0.175143, "n" ], [ 0.321073, " " ], [ 0.249849, "l" ], [ 0.281988, "z" ], [ 0.281179, "4" ], [ 1.223567, " " ], [ 0.604439, ":" ], [ 0.099497, ":" ], [ 0.760652, "{" ], [ 0.504646, "u" ], [ 0.249702, "s" ], [ 0.310204, "e" ], [ 0.156776, "r" ], [ 0.927624, "}" ], [ 0.972074, "-" ], [ 0.979824, "{" ], [ 0.397346, "n" ], [ 0.195251, "o" ], [ 0.203266, "w" ], [ 0.716944, "}" ], [ 0.992466, " " ], [ 0.404348, "\u001b[4mW\u001b[24m" ], [ 0.098053, "\b\u001b[4mW\u001b[4ma\u001b[24m \b" ], [ 0.440872, "\b\u001b[4ma\u001b[4ml\u001b[24m" ], [ 0.130433, "\b\u001b[4ml\u001b[4ml\u001b[24m" ], [ 0.079918, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.009903, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 ::{user}-{now} \u001b[4mWallpaper\u001b[24m\u001b[K" ], [ 1.432747, "\u001b[?1l\u001b>" ], [ 0.006238, "\u001b[?2004l\r\r\n" ], [ 0.001309, "\u001b]2;borg create --stats --progress --compression lz4 ::{user}-{now} Wallpaper\u0007\u001b]1;borg\u0007" ], [ 0.703285, "0 B O 0 B C 0 B D 0 N Wallpaper \r" ], [ 0.059704, "Initializing cache transaction: Reading config \r" ], [ 0.000259, "Initializing cache transaction: Reading chunks \r" ], [ 0.000283, "Initializing cache transaction: Reading files \r" ], [ 0.00035, " \r" ], [ 0.302813, "Compacting segments 0% \r" ], [ 0.000422, "Compacting segments 50% \r" ], [ 2.6e-05, " \r" ], [ 0.053481, "Saving files cache \r" ], [ 0.010102, "Saving chunks cache \r" ], [ 0.000354, "Saving cache config \r" ], [ 0.08865, " \r" ], [ 2.6e-05, " \r" ], [ 0.000371, "------------------------------------------------------------------------------\r\n" ], [ 3.4e-05, "Archive name: rugk-2017-07-16T18:51:34\r\n" ], [ 8e-06, "Archive fingerprint: d054cc411324d4bd848b39d1c9cad909073f9ff1a1a503a676d3e050be140396\r\n" ], [ 0.000101, "Time (start): Sun, 2017-07-16 18:51:34\r\nTime (end): Sun, 2017-07-16 18:51:35\r\n" ], [ 7.5e-05, "Duration: 0.18 seconds\r\nNumber of files: 1\r\n" ], [ 8.8e-05, "Utilization of maximum supported archive size: 0%\r\n" ], [ 7e-05, "------------------------------------------------------------------------------\r\n Original size Compressed size Deduplicated size\r\n" ], [ 1.6e-05, "This archive: 3.78 MB 3.80 MB 916 B\r\n" ], [ 5.2e-05, "All archives: 1.86 GB 1.86 GB 561.88 MB\r\n" ], [ 1.3e-05, "\r\n" ], [ 2.4e-05, " Unique chunks Total chunks\r\n" ], [ 2.4e-05, "Chunk index: 1008 3288\r\n" ], [ 2.4e-05, "------------------------------------------------------------------------------\r\n" ], [ 0.049018, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00124, "\u001b]7;\u0007" ], [ 0.000936, "\u001b]7;\u0007" ], [ 0.000124, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.9e-05, "\u001b[?1h\u001b=" ], [ 0.00019, "\u001b[?2004h" ], [ 0.814358, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.326066, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.279288, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.200695, "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.2241, "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.221056, "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.341582, "\b\u001b[1mi\u001b[1mce the backup name.\u001b[0m\u001b[39m" ], [ 1.40396, "\u001b[?1l\u001b>" ], [ 0.000442, "\u001b[?2004l\r\r\n" ], [ 0.000701, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00108, "\u001b]7;\u0007" ], [ 0.000942, "\u001b]7;\u0007" ], [ 5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 4.3e-05, "\u001b[?1h\u001b=" ], [ 0.00028, "\u001b[?2004h" ], [ 1.540998, "\u001b[?1l\u001b>" ], [ 0.000288, "\u001b[?2004l\r\r\n" ], [ 0.000571, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.0013, "\u001b]7;\u0007" ], [ 0.000852, "\u001b]7;\u0007" ], [ 0.000106, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.6e-05, "\u001b[?1h\u001b=" ], [ 0.000164, "\u001b[?2004h" ], [ 0.402376, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.27499, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.220032, "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" ], [ 0.127907, "\b\u001b[1mA\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.092357, "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" ], [ 0.145572, "\b\u001b[1md\u001b[1m \u001b[0m\u001b[39m" ], [ 0.222962, "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" ], [ 0.178534, "\b\u001b[1mw\u001b[1me\u001b[0m\u001b[39m" ], [ 0.196668, "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" ], [ 0.225933, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.175493, "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.119503, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.425112, "\b\u001b[1mn\u001b[1m put completely different data, with different backup settings, i\u001b[1mn\u001b[1m our backup. It will be deduplicated, anyway:\u001b[0m\u001b[39m\u001b[K" ], [ 1.421849, "\u001b[?1l\u001b>" ], [ 0.000749, "\u001b[?2004l\r\r\n" ], [ 0.00066, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00197, "\u001b]7;\u0007" ], [ 0.001476, "\u001b]7;\u0007" ], [ 5.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000124, "\u001b[?1h\u001b=" ], [ 0.000525, "\u001b[?2004h" ], [ 1.444268, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.209812, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.118788, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.145792, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.20446, " " ], [ 0.309592, "c" ], [ 0.201447, "r" ], [ 0.151315, "e" ], [ 0.084953, "a" ], [ 0.156918, "t" ], [ 0.091724, "e" ], [ 0.324287, " " ], [ 0.861486, "-" ], [ 0.134231, "-" ], [ 0.491182, "s" ], [ 0.195253, "t" ], [ 0.097572, "a" ], [ 0.09545, "t" ], [ 0.111782, "s" ], [ 0.301387, " " ], [ 0.524478, "-" ], [ 0.112538, "-" ], [ 0.397406, "p" ], [ 0.175509, "r" ], [ 0.203203, "o" ], [ 0.257392, "g" ], [ 0.1453, "r" ], [ 0.174285, "e" ], [ 0.353531, "s" ], [ 0.176989, "s" ], [ 0.386157, " " ], [ 0.510691, "-" ], [ 0.115919, "-" ], [ 0.225102, "c" ], [ 0.145577, "o" ], [ 0.133821, "m" ], [ 0.171364, "p" ], [ 0.157255, "r" ], [ 0.162989, "e" ], [ 0.256274, "s" ], [ 0.167254, "s" ], [ 0.253369, "i" ], [ 0.1197, "o" ], [ 0.178105, "n" ], [ 0.824434, " " ], [ 0.734608, "z" ], [ 0.237239, "l" ], [ 0.158877, "i" ], [ 0.148988, "b" ], [ 0.289236, "," ], [ 0.349273, "6" ], [ 0.618231, " " ], [ 0.449031, "-" ], [ 0.119307, "-" ], [ 0.451923, "e" ], [ 0.330743, "x" ], [ 0.232655, "c" ], [ 0.197384, "l" ], [ 0.176276, "u" ], [ 0.104427, "d" ], [ 0.141163, "e" ], [ 0.359309, " " ], [ 1.198529, "\u001b[4m~\u001b[24m" ], [ 0.338729, "\b\u001b[4m~\u001b[4m/\u001b[24m" ], [ 0.352573, "\b\u001b[4m/\u001b[4mD\u001b[24m" ], [ 0.190254, "\b\u001b[4mD\u001b[4mo\u001b[24m" ], [ 0.113631, "\b\u001b[4mo\u001b[4mw\u001b[24m" ], [ 0.743216, "\b\u001b[4mw\u001b[4mn\u001b[24m" ], [ 0.613852, "\b\u001b[4mn\u001b[4ml\u001b[24m" ], [ 0.121501, "\b\u001b[4ml\u001b[4mo\u001b[24m" ], [ 0.068625, "\b\u001b[4mo\u001b[4ma\u001b[24m" ], [ 0.183855, "\b\u001b[4ma\u001b[4md\u001b[24m" ], [ 0.152099, "\b\u001b[4md\u001b[4ms\u001b[24m" ], [ 0.793349, "\b\u001b[4ms\u001b[4m/\u001b[24m" ], [ 0.477575, "\b\u001b[4m/\u001b[4mb\u001b[24m" ], [ 0.198072, "\b\u001b[4mb\u001b[4mi\u001b[24m \r\u001b[K" ], [ 0.175276, "\u001b[A\u001b[77C\u001b[4mi\u001b[4mg\u001b[24m" ], [ 0.647369, "\r\u001b[4mg\u001b[24m " ], [ 0.439418, ":" ], [ 0.108932, ":" ], [ 0.556615, "{" ], [ 0.244626, "u" ], [ 0.097534, "s" ], [ 0.187502, "e" ], [ 0.16023, "r" ], [ 0.675542, "}" ], [ 0.988946, "-" ], [ 0.545789, "{" ], [ 0.33121, "n" ], [ 0.204667, "o" ], [ 0.141818, "w" ], [ 0.397217, "}" ], [ 0.979478, " " ], [ 0.768118, "\u001b[4m~\u001b[24m" ], [ 0.589532, "\b\u001b[4m~\u001b[4m/\u001b[24m" ], [ 0.515186, "\b\u001b[4m/\u001b[4mD\u001b[24m" ], [ 0.17703, "\b\u001b[4mD\u001b[4mo\u001b[24m" ], [ 0.121294, "\b\u001b[4mo\u001b[4mw\u001b[24m" ], [ 0.153543, "\b\u001b[4mw\u001b[4mn\u001b[24m" ], [ 0.282343, "\b\u001b[4mn\u001b[4ml\u001b[24m" ], [ 0.129573, "\b\u001b[4ml\u001b[4mo\u001b[24m" ], [ 0.095125, "\b\u001b[4mo\u001b[4ma\u001b[24m" ], [ 0.19963, "\b\u001b[4ma\u001b[4md\u001b[24m" ], [ 0.142667, "\b\u001b[4md\u001b[4ms\u001b[24m" ], [ 1.499285, "\u001b[?1l\u001b>" ], [ 0.003081, "\u001b[?2004l\r\r\n" ], [ 0.000637, "\u001b]2;borg create --stats --progress --compression zlib,6 --exclude ~/Downloads/big\u0007\u001b]1;borg\u0007" ], [ 0.687457, "0 B O 0 B C 0 B D 0 N home/rugk/Downloads \r" ], [ 0.025551, "Initializing cache transaction: Reading config \r" ], [ 0.000326, "Initializing cache transaction: Reading chunks \r" ], [ 0.000273, "Initializing cache transaction: Reading files \r" ], [ 0.000394, " \r" ], [ 0.220691, "1.31 MB O 1.29 MB C 1.29 MB D 1 N home/rugk/Downloads...chiveWithStuffHere.zip\r" ], [ 0.26224, "7.70 MB O 6.91 MB C 6.91 MB D 2 N home/rugk/Downloads...droid.gms-11059462.apk\r" ], [ 0.32599, "Compacting segments 0% \r" ], [ 0.026073, "Compacting segments 50% \r" ], [ 0.001982, " \r" ], [ 0.058565, "Saving files cache \r" ], [ 0.011363, "Saving chunks cache \r" ], [ 0.000378, "Saving cache config \r" ], [ 0.12955, " \r" ], [ 3.4e-05, " \r" ], [ 0.00039, "------------------------------------------------------------------------------\r\n" ], [ 1.6e-05, "Archive name: rugk-2017-07-16T18:52:19\r\n" ], [ 3.1e-05, "Archive fingerprint: 0de98f590b004ad7545f2013c4c9f2d4e3eed1415d177c89d6c2b7ff05918d2e\r\n" ], [ 2.2e-05, "Time (start): Sun, 2017-07-16 18:52:19\r\n" ], [ 7.2e-05, "Time (end): Sun, 2017-07-16 18:52:20\r\nDuration: 0.63 seconds\r\n" ], [ 3e-05, "Number of files: 6\r\n" ], [ 2.5e-05, "Utilization of maximum supported archive size: 0%\r\n" ], [ 2.4e-05, "------------------------------------------------------------------------------\r\n" ], [ 1.8e-05, " Original size Compressed size Deduplicated size\r\n" ], [ 2.5e-05, "This archive: 9.55 MB 8.04 MB 8.04 MB\r\n" ], [ 2.4e-05, "All archives: 1.87 GB 1.86 GB 569.92 MB\r\n" ], [ 2.5e-05, "\r\n" ], [ 2.4e-05, " Unique chunks Total chunks\r\n" ], [ 2.4e-05, "Chunk index: 1023 3303\r\n" ], [ 2.4e-05, "------------------------------------------------------------------------------\r\n" ], [ 0.063104, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001326, "\u001b]7;\u0007" ], [ 0.001145, "\u001b]7;\u0007" ], [ 8.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.9e-05, "\u001b[?1h\u001b=" ], [ 0.0002, "\u001b[?2004h" ], [ 3.131399, "\u001b[?1l\u001b>" ], [ 0.000281, "\u001b[?2004l\r\r\n" ], [ 0.00048, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001354, "\u001b]7;\u0007" ], [ 0.000923, "\u001b]7;\u0007" ], [ 6.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 5.4e-05, "\u001b[?1h\u001b=" ], [ 0.000161, "\u001b[?2004h" ], [ 0.285262, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.419379, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.277555, "\b\b\u001b[1m#\u001b[1m \u001b[1mO\u001b[0m\u001b[39m" ], [ 0.015676, "\b\u001b[1mO\u001b[0m\u001b[39m" ], [ 0.119839, "\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.315418, "\b\u001b[1mr\u001b[1m \u001b[0m\u001b[39m" ], [ 0.224426, "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" ], [ 0.10624, "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" ], [ 0.170324, "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.995665, "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" ], [ 0.139331, "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.174188, "\b\u001b[1ms\u001b[1m backup a device via STDIN.\u001b[0m\u001b[39m" ], [ 1.117059, "\u001b[?1l\u001b>" ], [ 0.000376, "\u001b[?2004l\r\r\n" ], [ 0.000566, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001272, "\u001b]7;\u0007" ], [ 0.000893, "\u001b]7;\u0007" ], [ 8.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 3.9e-05, "\u001b[?1h\u001b=" ], [ 0.000291, "\u001b[?2004h" ], [ 2.390246, "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" ], [ 0.179283, "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" ], [ 0.08919, "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" ], [ 0.156134, "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" ], [ 0.939511, " " ], [ 0.219491, "\u001b[32md\u001b[39m" ], [ 0.128817, "\b\u001b[32md\u001b[32md\u001b[39m" ], [ 0.317081, " " ], [ 0.206442, "i" ], [ 0.127682, "f" ], [ 0.497718, "=" ], [ 0.79125, "/" ], [ 0.162326, "d" ], [ 0.141147, "e" ], [ 0.17081, "v" ], [ 0.229501, "/" ], [ 0.309668, "s" ], [ 0.201626, "d" ], [ 0.121565, "x" ], [ 1.112764, " " ], [ 0.458342, "b" ], [ 0.13412, "s" ], [ 0.426796, "=" ], [ 0.325514, "1" ], [ 0.182735, "0" ], [ 0.635284, "M" ], [ 0.571527, " " ], [ 0.644682, "|" ], [ 0.668689, " " ], [ 0.368219, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.197192, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.069454, "\b\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.15983, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.193693, " " ], [ 0.342177, "c" ], [ 0.213502, "r" ], [ 0.165989, "e" ], [ 0.101269, "a" ], [ 0.20561, "t" ], [ 0.172574, "e" ], [ 0.302751, " " ], [ 0.524261, "-" ], [ 0.112867, "-" ], [ 0.358854, "p" ], [ 0.158933, "r" ], [ 0.146881, "o" ], [ 0.235592, "g" ], [ 0.153909, "r" ], [ 0.187519, "e" ], [ 0.278997, "s" ], [ 0.161351, "s" ], [ 0.536239, " " ], [ 0.472536, "-" ], [ 0.103445, "-" ], [ 0.315142, "s" ], [ 0.188015, "t" ], [ 0.092463, "a" ], [ 0.121697, "t" ], [ 0.108331, "s" ], [ 0.863705, " " ], [ 0.547363, ":" ], [ 0.101957, ":" ], [ 0.713103, "s" ], [ 0.172527, "p" ], [ 0.143374, "e" ], [ 0.495475, "c" ], [ 0.184747, "i" ], [ 0.118626, "a" ], [ 0.21782, "l" ], [ 0.61779, "b" ], [ 0.056813, "a" ], [ 0.18761, "c" ], [ 0.116227, "k" ], [ 0.143399, "u \r\u001b[K" ], [ 0.31621, "p" ], [ 0.174943, "\rp " ], [ 0.964699, "-" ], [ 1.23368, "\u001b[?1l\u001b>" ], [ 0.003628, "\u001b[?2004l\r\r\n" ], [ 0.000824, "\u001b]2;sudo dd if=/dev/sdx bs=10M | borg create --progress --stats ::specialbackup\u0007\u001b]1;dd\u0007" ], [ 0.023411, "[sudo] password for rugk: " ], [ 3.286582, "\r\n" ], [ 0.077852, "Initializing cache transaction: Reading config \r" ], [ 0.000267, "Initializing cache transaction: Reading chunks \r" ], [ 0.000293, "Initializing cache transaction: Reading files \r" ], [ 0.00045, " \r" ], [ 0.083816, "8.39 MB O 34.25 kB C 34.25 kB D 0 N stdin \r" ], [ 0.228267, "41.94 MB O 166.40 kB C 100.50 kB D 0 N stdin \r" ], [ 0.216716, "75.50 MB O 298.20 kB C 100.50 kB D 0 N stdin \r" ], [ 0.218476, "109.05 MB O 430.00 kB C 100.50 kB D 0 N stdin \r" ], [ 0.219164, "142.61 MB O 562.12 kB C 133.77 kB D 0 N stdin \r" ], [ 0.216368, "176.16 MB O 693.92 kB C 133.77 kB D 0 N stdin \r" ], [ 0.222311, "209.72 MB O 825.72 kB C 133.77 kB D 0 N stdin \r" ], [ 0.217156, "243.27 MB O 957.52 kB C 133.77 kB D 0 N stdin \r" ], [ 0.22399, "276.82 MB O 1.09 MB C 166.77 kB D 0 N stdin \r" ], [ 0.223827, "310.38 MB O 1.22 MB C 166.77 kB D 0 N stdin \r" ], [ 0.220959, "343.93 MB O 1.35 MB C 166.77 kB D 0 N stdin \r" ], [ 0.223439, "377.49 MB O 1.48 MB C 166.77 kB D 0 N stdin \r" ], [ 0.226226, "411.04 MB O 1.62 MB C 200.04 kB D 0 N stdin \r" ], [ 0.239743, "444.60 MB O 1.75 MB C 200.04 kB D 0 N stdin \r" ], [ 0.229508, "478.15 MB O 1.88 MB C 200.04 kB D 0 N stdin \r" ], [ 0.220491, "511.71 MB O 2.01 MB C 200.04 kB D 0 N stdin \r" ], [ 0.2504, "545.26 MB O 2.14 MB C 200.04 kB D 0 N stdin \r" ], [ 0.241044, "578.81 MB O 2.28 MB C 200.04 kB D 0 N stdin \r" ], [ 0.215372, "612.37 MB O 2.41 MB C 200.04 kB D 0 N stdin \r" ], [ 0.113508, "60+0 records in\r\n60+0 records out\r\n" ], [ 3.9e-05, "629145600 bytes (629 MB, 600 MiB) copied, 4.31277 s, 146 MB/s\r\n" ], [ 0.231874, "Compacting segments 0% \r" ], [ 0.001188, "Compacting segments 50% \r" ], [ 3.7e-05, " \r" ], [ 0.078344, "Saving chunks cache \r" ], [ 0.000348, "Saving cache config \r" ], [ 0.087821, " \r" ], [ 2.8e-05, " \r" ], [ 0.000346, "------------------------------------------------------------------------------\r\n" ], [ 2.2e-05, "Archive name: specialbackup\r\n" ], [ 9.7e-05, "Archive fingerprint: 68e942cc4a48402e48ba87f4887c24e5b9fe06e881b0ca241c791810a108bec0\r\nTime (start): Sun, 2017-07-16 18:52:58\r\n" ], [ 0.000133, "Time (end): Sun, 2017-07-16 18:53:05\r\nDuration: 6.99 seconds\r\n" ], [ 1.3e-05, "Number of files: 1\r\n" ], [ 2.2e-05, "Utilization of maximum supported archive size: 0%\r\n" ], [ 7.3e-05, "------------------------------------------------------------------------------\r\n" ], [ 1.1e-05, " Original size Compressed size Deduplicated size\r\n" ], [ 2.7e-05, "This archive: 629.15 MB 2.47 MB 234.02 kB\r\n" ], [ 3.3e-05, "All archives: 2.50 GB 1.87 GB 570.15 MB\r\n" ], [ 3.3e-05, "\r\n" ], [ 3.3e-05, " Unique chunks Total chunks\r\n" ], [ 2.4e-05, "Chunk index: 1032 3380\r\n" ], [ 2.4e-05, "------------------------------------------------------------------------------\r\n" ], [ 0.047256, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001383, "\u001b]7;\u0007" ], [ 0.001024, "\u001b]7;\u0007" ], [ 8.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " ], [ 7e-06, "\u001b[K" ], [ 7.1e-05, "\u001b[?1h\u001b=" ], [ 0.00021, "\u001b[?2004h" ], [ 3.669021, "\u001b[?1l\u001b>" ], [ 0.000291, "\u001b[?2004l\r\r\n" ], [ 0.000719, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001178, "\u001b]7;\u0007" ], [ 0.0009, "\u001b]7;\u0007" ], [ 9.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.6e-05, "\u001b[?1h\u001b=" ], [ 0.00022, "\u001b[?2004h" ], [ 0.311851, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.290767, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.23476, "\b\b\u001b[1m#\u001b[1m \u001b[1mL\u001b[0m\u001b[39m" ], [ 0.188456, "\b\u001b[1mL\u001b[1me\u001b[0m\u001b[39m" ], [ 0.139916, "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.522516, "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" ], [ 0.157443, "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.460729, "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" ], [ 0.1201, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.324466, "\b\u001b[1mc\u001b[1montinue with some simple things:\u001b[0m\u001b[39m" ], [ 0.634167, "\u001b[?1l\u001b>" ], [ 0.000434, "\u001b[?2004l\r\r\n" ], [ 0.0006, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00124, "\u001b]7;\u0007" ], [ 0.001113, "\u001b]7;\u0007" ], [ 0.00012, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000136, "\u001b[?1h\u001b=" ], [ 0.000274, "\u001b[?2004h" ], [ 1.724466, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.116327, "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.26172, "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.250198, "\b\u001b[1m \u001b[1mU\u001b[0m\u001b[39m" ], [ 0.746624, "\b\u001b[1mU\u001b[1mSEFUL COMMANDS ##\u001b[0m\u001b[39m" ], [ 0.5602, "\u001b[?1l\u001b>" ], [ 0.001411, "\u001b[?2004l\r\r\n" ], [ 0.001009, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.003137, "\u001b]7;\u0007" ], [ 0.002454, "\u001b]7;\u0007" ], [ 0.000167, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000157, "\u001b[?1h\u001b=" ], [ 0.000746, "\u001b[?2004h" ], [ 1.207899, "\u001b[?1l\u001b>" ], [ 0.000322, "\u001b[?2004l\r\r\n" ], [ 0.000472, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001289, "\u001b]7;\u0007" ], [ 0.000891, "\u001b]7;\u0007" ], [ 9.5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " ], [ 1.8e-05, "\u001b[K" ], [ 0.000115, "\u001b[?1h\u001b=" ], [ 0.000246, "\u001b[?2004h" ], [ 0.734707, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.247085, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.182467, "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" ], [ 0.123582, "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.16343, "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.183388, "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" ], [ 0.083055, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.187526, "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.130988, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.142246, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.145489, "\b\u001b[1m \u001b[1ms\u001b[0m\u001b[39m" ], [ 0.132155, "\b\u001b[1ms\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.192915, "\b\u001b[1mh\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.142644, "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" ], [ 0.149707, "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" ], [ 0.134515, "\b\u001b[1m \u001b[1ms\u001b[0m\u001b[39m" ], [ 0.085942, "\b\u001b[1ms\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.160772, "\b\u001b[1mo\u001b[1mm\u001b[0m\u001b[39m" ], [ 0.132016, "\b\u001b[1mm\u001b[1me\u001b[0m\u001b[39m" ], [ 0.219601, "\b\u001b[1me\u001b[1m information about an archive. You can even do it without \u001b[1mn\u001b[1meeding to specify the archive name:\u001b[0m\u001b[39m\u001b[K" ], [ 0.644657, "\u001b[?1l\u001b>" ], [ 0.000392, "\u001b[?2004l\r\r\n" ], [ 0.000705, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001347, "\u001b]7;\u0007" ], [ 0.001099, "\u001b]7;\u0007" ], [ 4.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.0001, "\u001b[?1h\u001b=" ], [ 0.000372, "\u001b[?2004h" ], [ 2.264862, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.182056, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.083939, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.152072, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.142791, " " ], [ 0.224315, "i" ], [ 0.130651, "n" ], [ 0.100647, "f" ], [ 0.155636, "o" ], [ 0.716063, " " ], [ 0.736635, ":" ], [ 0.107352, ":" ], [ 0.289804, " " ], [ 0.436564, "-" ], [ 0.131871, "-" ], [ 0.824072, "l" ], [ 0.061945, "a" ], [ 0.136723, "s" ], [ 0.143197, "t" ], [ 0.186833, " " ], [ 0.125784, "1" ], [ 0.924568, "\u001b[?1l\u001b>" ], [ 0.002555, "\u001b[?2004l\r\r\n" ], [ 0.00096, "\u001b]2;borg info :: --last 1\u0007\u001b]1;borg\u0007" ], [ 0.693043, "Archive name: specialbackup\r\nArchive fingerprint: 68e942cc4a48402e48ba87f4887c24e5b9fe06e881b0ca241c791810a108bec0\r\nComment: \r\nHostname: tux\r\nUsername: rugk\r\nTime (start): Sun, 2017-07-16 18:52:58\r\nTime (end): Sun, 2017-07-16 18:53:05\r\nDuration: 6.99 seconds\r\nNumber of files: 1\r\nCommand line: borg create --progress --stats ::specialbackup -\r\nUtilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n Original size Compressed size Deduplicated size\r\nThis archive: 629.15 MB 2.47 MB 234.02 kB\r\nAll archives: 2.50 GB 1.87 GB 570.15 MB\r\n\r\n Unique chunks Total chunks\r\nChunk index: 1032 3380\r\n" ], [ 0.045207, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001204, "\u001b]7;\u0007" ], [ 0.000923, "\u001b]7;\u0007" ], [ 3.5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000129, "\u001b[?1h\u001b=" ], [ 0.000196, "\u001b[?2004h" ], [ 1.70302, "\u001b[?1l\u001b>" ], [ 0.000314, "\u001b[?2004l\r\r\n" ], [ 0.000475, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001262, "\u001b]7;\u0007" ], [ 0.00098, "\u001b]7;\u0007" ], [ 4.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.1e-05, "\u001b[?1h\u001b=" ], [ 0.000164, "\u001b[?2004h" ], [ 0.281651, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.234109, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.181326, "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" ], [ 0.12398, "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.166912, "\b\u001b[1mo\u001b[1m \u001b[0m\u001b[39m" ], [ 0.490114, "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" ], [ 0.160581, "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" ], [ 0.148283, "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.453708, "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" ], [ 0.118956, "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.125062, "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" ], [ 0.130519, "\b\u001b[1m \u001b[1mr\u001b[0m\u001b[39m" ], [ 0.130132, "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" ], [ 0.265033, "\b\u001b[1me\u001b[1mname our last archive:\u001b[0m\u001b[39m" ], [ 1.001935, "\u001b[?1l\u001b>" ], [ 0.000416, "\u001b[?2004l\r\r\n" ], [ 0.0006, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00114, "\u001b]7;\u0007" ], [ 0.000898, "\u001b]7;\u0007" ], [ 2.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 9.5e-05, "\u001b[?1h\u001b=" ], [ 0.000286, "\u001b[?2004h" ], [ 1.253113, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.202007, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.105752, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.134948, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.14764, " " ], [ 0.157682, "r" ], [ 0.124491, "e" ], [ 0.118993, "n" ], [ 0.140445, "a" ], [ 0.101365, "m" ], [ 0.115953, "e" ], [ 1.107064, " " ], [ 0.561405, ":" ], [ 0.103305, ":" ], [ 0.263633, "s" ], [ 0.142089, "p" ], [ 0.134253, "e" ], [ 0.240688, "c" ], [ 0.136782, "i" ], [ 0.128372, "a" ], [ 0.170065, "l" ], [ 0.592209, "b" ], [ 0.348417, "a" ], [ 0.210896, "c" ], [ 0.259528, "k" ], [ 0.171523, "u" ], [ 0.245786, "p" ], [ 0.582735, " " ], [ 0.568884, "b" ], [ 0.101982, "a" ], [ 0.162673, "c" ], [ 0.104218, "k" ], [ 0.132828, "u" ], [ 0.245157, "p" ], [ 0.266242, "-" ], [ 0.316388, "b" ], [ 0.43535, "l" ], [ 0.133908, "o" ], [ 0.047013, "c" ], [ 0.622041, "k" ], [ 0.82215, "-" ], [ 0.183882, "d" ], [ 0.189034, "e" ], [ 0.181902, "v" ], [ 0.18728, "i" ], [ 0.052242, "c" ], [ 0.160462, "e" ], [ 0.645053, "\u001b[?1l\u001b>" ], [ 0.001146, "\u001b[?2004l\r\r\n" ], [ 0.000741, "\u001b]2;borg rename ::specialbackup backup-block-device\u0007\u001b]1;borg\u0007" ], [ 1.136038, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001149, "\u001b]7;\u0007" ], [ 0.000968, "\u001b]7;\u0007" ], [ 7.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000107, "\u001b[?1h\u001b=" ], [ 0.000193, "\u001b[?2004h" ], [ 1.203902, "\u001b[32mborg\u001b[39m rename ::specialbackup backup-block-device" ], [ 0.192203, "\u001b[47D\u001b[1m#\u001b[1m \u001b[1mS\u001b[1mo\u001b[1m \u001b[1ml\u001b[1me\u001b[1mt\u001b[1m'\u001b[1ms\u001b[1m \u001b[1mr\u001b[1me\u001b[1mn\u001b[1ma\u001b[1mm\u001b[1me\u001b[1m \u001b[1mo\u001b[1mu\u001b[1mr\u001b[1m \u001b[1ml\u001b[1ma\u001b[1ms\u001b[1mt\u001b[1m \u001b[1ma\u001b[1mr\u001b[1mc\u001b[1mh\u001b[1mi\u001b[1mv\u001b[1me\u001b[1m:\u001b[0m\u001b[39m \u001b[12D" ], [ 0.528657, "\u001b[35D\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39mi\u001b[0m\u001b[39mn\u001b[0m\u001b[39mf\u001b[0m\u001b[39mo\u001b[0m\u001b[39m \u001b[0m\u001b[39m:\u001b[0m\u001b[39m:\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ml\u001b[0m\u001b[39ma\u001b[0m\u001b[39ms\u001b[0m\u001b[39mt\u001b[0m\u001b[39m \u001b[0m\u001b[39m1\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[14D" ], [ 0.548884, "\u001b[?1l\u001b>" ], [ 0.003595, "\u001b[?2004l\r\r\n" ], [ 0.000857, "\u001b]2;borg info :: --last 1\u0007\u001b]1;borg\u0007" ], [ 0.689879, "Archive name: backup-block-device\r\nArchive fingerprint: 5fd9732b4809252742a7cb3fadf2a971dd6371afd11a07944c0b5803d57c240f\r\nComment: \r\nHostname: tux\r\nUsername: rugk\r\nTime (start): Sun, 2017-07-16 18:52:58\r\nTime (end): Sun, 2017-07-16 18:53:05\r\nDuration: 6.99 seconds\r\nNumber of files: 1\r\nCommand line: borg create --progress --stats ::specialbackup -\r\nUtilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n Original size Compressed size Deduplicated size\r\nThis archive: 629.15 MB 2.47 MB 234.04 kB\r\nAll archives: 2.50 GB 1.87 GB 570.15 MB\r\n\r\n Unique chunks Total chunks\r\nChunk index: 1032 3380\r\n" ], [ 0.044772, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001259, "\u001b]7;\u0007" ], [ 0.001013, "\u001b]7;\u0007" ], [ 8.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000109, "\u001b[?1h\u001b=" ], [ 0.000191, "\u001b[?2004h" ], [ 2.415375, "\u001b[?1l\u001b>" ], [ 0.000379, "\u001b[?2004l\r\r\n" ], [ 0.000632, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001347, "\u001b]7;\u0007" ], [ 0.001044, "\u001b]7;\u0007" ], [ 8.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000101, "\u001b[?1h\u001b=" ], [ 0.000183, "\u001b[?2004h" ], [ 0.412865, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.250988, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.245192, "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" ], [ 0.706056, "\b\u001b[1mA\u001b[1m \u001b[0m\u001b[39m" ], [ 0.273409, "\b\u001b[1m \u001b[1mv\u001b[0m\u001b[39m" ], [ 0.194462, "\b\u001b[1mv\u001b[1me\u001b[0m\u001b[39m" ], [ 0.114445, "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.097756, "\b\u001b[1mr\u001b[1my\u001b[0m\u001b[39m" ], [ 0.149155, "\b\u001b[1my\u001b[1m \u001b[0m\u001b[39m" ], [ 0.258303, "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" ], [ 0.133528, "\b\u001b[1mi\u001b[1mm\u001b[0m\u001b[39m" ], [ 0.225062, "\b\u001b[1mm\u001b[1mp\u001b[0m\u001b[39m" ], [ 0.352638, "\b\u001b[1mp\u001b[1mortant step if you choose keyfile mode (where the keyfile is onl\u001b[1my\u001b[1m saved locally) is to export your keyfile and possibly print it, etc.\u001b[0m\u001b[39m\u001b[K" ], [ 1.170303, "\u001b[?1l\u001b>" ], [ 0.000524, "\u001b[?2004l\r\r\n" ], [ 0.000714, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001487, "\u001b]7;\u0007" ], [ 0.001303, "\u001b]7;\u0007" ], [ 3.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.5e-05, "\u001b[?1h\u001b=" ], [ 0.000291, "\u001b[?2004h" ], [ 2.080689, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.197142, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.172626, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.145083, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.943024, " " ], [ 0.511742, "k" ], [ 0.274338, "e" ], [ 0.308416, "y" ], [ 0.568141, " " ], [ 0.62626, "e" ], [ 0.224255, "x" ], [ 2.028973, "p" ], [ 0.220629, "o" ], [ 0.395617, "r" ], [ 0.127004, "t" ], [ 0.635262, " " ], [ 0.728631, ":" ], [ 0.116567, ":" ], [ 0.347323, " " ], [ 1.713208, "-" ], [ 0.134471, "-" ], [ 0.298094, "q" ], [ 0.316108, "r" ], [ 0.373821, "-" ], [ 0.416623, "c" ], [ 0.400783, "o" ], [ 0.107762, "d" ], [ 0.134276, "e" ], [ 0.384438, " " ], [ 0.447909, "f" ], [ 0.162017, "i" ], [ 0.113187, "l" ], [ 0.069321, "e" ], [ 0.627894, "." ], [ 0.32877, "h" ], [ 0.137354, "t" ], [ 0.181468, "m" ], [ 0.156847, "l" ], [ 0.434616, " " ], [ 0.906636, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.546016, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 1.755972, "\b\u001b[1m \u001b[1mthis creates a nice HTML, but when \u001b[1my\u001b[1mou want something simpler…\u001b[0m\u001b[39m\u001b[K" ], [ 2.940038, "\b\b\u001b[1mr\u001b[0m\u001b[39m\u001b[K" ], [ 0.691374, "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.501031, "\b\b\u001b[1ml\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.0295, "\b\b\u001b[1mp\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029695, "\b\b\u001b[1mm\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029437, "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.03032, "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029433, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030373, "\b\b\u001b[1mg\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029337, "\b\b\u001b[1mn\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.031058, "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029329, "\b\b\u001b[1mh\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.031142, "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029181, "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029786, "\b\b\u001b[1mm\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030603, "\b\b\u001b[1mo\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029332, "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030813, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029428, "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029368, "\b\b\u001b[1mn\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030166, "\b\b\u001b[1ma\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030524, "\b\b\u001b[1mw\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029333, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030607, "\b\b\u001b[1mu\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029346, "\r\u001b[1my\u001b[1mo\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.031102, "\r\u001b[1my\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029544, "\u001b[A\u001b[76C\u001b[1m \u001b[0m\u001b[39m \u001b[K\r" ], [ 0.029675, "\u001b[A\u001b[76C\u001b[1mn\u001b[0m\u001b[39m\u001b[K\u001b[1B\r\u001b[K\u001b[A\u001b[77C" ], [ 0.030809, "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.02987, "\b\b\u001b[1mh\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029707, "\b\b\u001b[1mw\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029901, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.03057, "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029469, "\b\b\u001b[1mu\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030219, "\b\b\u001b[1mb\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029227, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030465, "\b\b\u001b[1m,\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029423, "\b\b\u001b[1mL\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030292, "\b\b\u001b[1mM\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030715, "\b\b\u001b[1mT\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029641, "\b\b\u001b[1mH\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029367, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.031235, "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030119, "\b\b\u001b[1mc\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030061, "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030102, "\b\b\u001b[1mn\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029384, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029499, "\b\b\u001b[1ma\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.03047, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.03019, "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029337, "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030138, "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030049, "\b\b\u001b[1ma\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030132, "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029948, "\b\b\u001b[1mr\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029428, "\b\b\u001b[1mc\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030197, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.030196, "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.03118, "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.028165, "\b\b\u001b[1mh\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.03128, "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.029716, "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.03012, "\b\b\u001b[1m#\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.346808, "\b\u001b[0m\u001b[39m \b" ], [ 0.19843, "\b" ], [ 0.307235, "\b \b" ], [ 0.499683, "\b \b" ], [ 0.028468, "\b \b" ], [ 0.029472, "\b \b" ], [ 0.030565, "\b \b" ], [ 0.029224, "\b \b" ], [ 0.030493, "\b \b" ], [ 0.030666, "\b \b" ], [ 0.029185, "\b \b" ], [ 0.02989, "\b" ], [ 0.029921, "\b \b" ], [ 0.029657, "\b \b" ], [ 0.154399, "\b \b" ], [ 0.165915, "\b \b" ], [ 0.154316, "\b \b" ], [ 0.154588, "\b \b" ], [ 0.147868, "\b \b" ], [ 1.555865, "p" ], [ 0.446126, "a" ], [ 0.188714, "p" ], [ 0.252833, "e" ], [ 0.142044, "r" ], [ 0.395895, " " ], [ 0.423453, "\u001b[1m# this is a \"manual input\"-only backup (but it is\u001b[1m \u001b[1malso included in the --qr-code option)\u001b[0m\u001b[39m\u001b[K" ], [ 3.71528, "\u001b[?1l\u001b>" ], [ 0.001413, "\u001b[?2004l\r\r\n" ], [ 0.000757, "\u001b]2;borg key export :: --paper\u0007\u001b]1;borg\u0007" ], [ 0.550352, "To restore key use borg key import --paper /path/to/repo\r\n\r\nBORG PAPER KEY v1\r\nid: 20 / 54f957 2d6d72 de8280 / 158a57 45bdc3 - f6\r\n 1: 86a961 6c676f 726974 686da6 736861 323536 - 14\r\n 2: a46461 7461da 00def1 7c9f3c 81ebc6 730a05 - 35\r\n 3: 12453e d02760 ffdeef 4d0daa 231d81 ae10d8 - e5\r\n 4: 7bb0a1 97c30f 312b61 7170ba d1ea91 da2c88 - 30\r\n 5: ca997e 177b74 38f906 709a66 fbf013 40ab3d - c4\r\n 6: 6af94b 8a36a9 e07b9d b0e08d 3935cd f1bbb9 - 5c\r\n 7: 2b10b6 ebb586 4c0967 f682b9 c64358 fbb63c - a4\r\n 8: b9fc94 240d08 072524 98b619 7bd1c5 21094e - ec\r\n 9: ac4f05 d65a6a 7f8a0d 8cc14e 405b36 c248e1 - 79\r\n10: d23b89 c61074 3e68c9 79c683 2384e8 cd9f82 - 50\r\n11: fc76a9 3f2a9e 05d5f1 313f95 ec4313 53e0c1 - 4a\r\n12: 654f1d ab2b51 2ccbe8 80be07 b6132f 86aeb5 - 11\r\n13: 7e6e48 5ff0d4 41e659 a421f0 5123df f88dff - c9\r\n14: 03db58 bbb410 87d7fc 075b14 5108a4 686173 - 9a\r\n15: 68da00 20524b 8769e9 e5bd18 a9b431 c05b49 - ba\r\n16: 505280 9b104a b081c0 f4efd1 1d3771 34c701 - 40\r\n17: aa6974 657261 74696f 6e73ce 000186 a0a473 - 15\r\n18: 616c7" ], [ 7.2e-05, "4 da0020 0be74e e1e9af 7b1364 3ee362 - 32\r\n19: 643069 b57a75 d30eb6 104c28 367e17 7dd4d9 - 79\r\n20: f556a7 766572 73696f 6e01 - 32\r\n\r\n" ], [ 0.048873, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001193, "\u001b]7;\u0007" ], [ 0.000921, "\u001b]7;\u0007" ], [ 9.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8e-05, "\u001b[?1h\u001b=" ], [ 0.000185, "\u001b[?2004h" ], [ 3.146565, "\u001b[?1l\u001b>" ], [ 0.000424, "\u001b[?2004l\r\r\n" ], [ 0.000795, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001307, "\u001b]7;\u0007" ], [ 0.001444, "\u001b]7;\u0007" ], [ 8.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.00011, "\u001b[?1h\u001b=" ], [ 0.000263, "\u001b[?2004h" ], [ 0.441809, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.136081, "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.375389, "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.284554, "\b\u001b[1m \u001b[1mM\u001b[0m\u001b[39m" ], [ 0.395833, "\b\u001b[1mM\u001b[1mA\u001b[0m\u001b[39m" ], [ 0.434316, "\b\u001b[1mA\u001b[1mINTENANCE ##\u001b[0m\u001b[39m" ], [ 1.471226, "\u001b[?1l\u001b>" ], [ 0.00055, "\u001b[?2004l\r\r\n" ], [ 0.000605, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001464, "\u001b]7;\u0007" ], [ 0.00092, "\u001b]7;\u0007" ], [ 9.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000104, "\u001b[?1h\u001b=" ], [ 0.000309, "\u001b[?2004h" ], [ 0.977805, "\u001b[?1l\u001b>" ], [ 0.000452, "\u001b[?2004l\r\r\n" ], [ 0.000828, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001436, "\u001b]7;\u0007" ], [ 0.001464, "\u001b]7;\u0007" ], [ 3.8e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000171, "\u001b[?1h\u001b=" ], [ 0.000247, "\u001b[?2004h" ], [ 0.221358, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.374414, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.189751, "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" ], [ 0.087275, "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.140008, "\b\u001b[1mo\u001b[1mm\u001b[0m\u001b[39m" ], [ 0.150891, "\b\u001b[1mm\u001b[1me\u001b[0m\u001b[39m" ], [ 0.387855, "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.204067, "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.127209, "\b\u001b[1mi\u001b[1mm\u001b[0m\u001b[39m" ], [ 0.073999, "\b\u001b[1mm\u001b[1me\u001b[0m\u001b[39m" ], [ 0.130356, "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.224406, "\b\u001b[1ms\u001b[1m backups get broken or we want a regular \"checkup\" that everythin\u001b[1mg\u001b[1m is okay…\u001b[0m\u001b[39m\u001b[K" ], [ 2.361948, "\u001b[?1l\u001b>" ], [ 0.000402, "\u001b[?2004l\r\r\n" ], [ 0.000743, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001212, "\u001b]7;\u0007" ], [ 0.000923, "\u001b]7;\u0007" ], [ 1.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.3e-05, "\u001b[?1h\u001b=" ], [ 0.000321, "\u001b[?2004h" ], [ 2.246766, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.18622, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.121068, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.146401, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.255479, " " ], [ 0.268833, "c" ], [ 0.154418, "h" ], [ 0.106649, "e" ], [ 0.142762, "c" ], [ 0.306359, "k" ], [ 0.697455, " " ], [ 1.113236, "-" ], [ 0.768765, "v" ], [ 0.477353, " " ], [ 0.387303, ":" ], [ 0.102251, ":" ], [ 0.749971, "\u001b[?1l\u001b>" ], [ 0.001961, "\u001b[?2004l\r\r\n" ], [ 0.000798, "\u001b]2;borg check -v ::\u0007\u001b]1;borg\u0007" ], [ 0.54272, "Starting repository check\r\n" ], [ 1.152819, "Starting repository index check\r\n" ], [ 0.00038, "Completed repository check, no problems found.\r\n" ], [ 0.000129, "Starting archive consistency check...\r\n" ], [ 0.095799, "Analyzing archive backup1 (1/6)\r\n" ], [ 0.109358, "Analyzing archive backup2 (2/6)\r\n" ], [ 0.036555, "Analyzing archive backup3 (3/6)\r\n" ], [ 0.03649, "Analyzing archive rugk-2017-07-16T18:51:34 (4/6)\r\n" ], [ 0.000491, "Analyzing archive rugk-2017-07-16T18:52:19 (5/6)\r\n" ], [ 0.000729, "Analyzing archive backup-block-device (6/6)\r\n" ], [ 0.00119, "Archive consistency check complete, no problems found.\r\n" ], [ 0.081895, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001153, "\u001b]7;\u0007" ], [ 0.000924, "\u001b]7;\u0007" ], [ 0.000108, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.6e-05, "\u001b[?1h\u001b=" ], [ 0.00022, "\u001b[?2004h" ], [ 2.243609, "\u001b[?1l\u001b>" ], [ 0.000511, "\u001b[?2004l\r\r\n" ], [ 0.000535, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001789, "\u001b]7;\u0007" ], [ 0.00157, "\u001b]7;\u0007" ], [ 0.000139, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.7e-05, "\u001b[?1h\u001b=" ], [ 0.00033, "\u001b[?2004h" ], [ 0.326751, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.24289, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.285802, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.191158, "\b\u001b[1mN\u001b[1me\u001b[0m\u001b[39m" ], [ 0.184029, "\b\u001b[1me\u001b[1mx\u001b[0m\u001b[39m" ], [ 0.16373, "\b\u001b[1mx\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.239936, "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" ], [ 0.27885, "\b\u001b[1m \u001b[1mp\u001b[0m\u001b[39m" ], [ 0.12665, "\b\u001b[1mp\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.154792, "\b\u001b[1mr\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.372203, "\b\u001b[1mo\u001b[1mblem: Usually you do not have infinite disk space. So you may need\u001b[1m \u001b[1mto prune your archive…\u001b[0m\u001b[39m\u001b[K" ], [ 1.956234, "\u001b[?1l\u001b>" ], [ 0.000446, "\u001b[?2004l\r\r\n" ], [ 0.000607, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001281, "\u001b]7;\u0007" ], [ 0.000983, "\u001b]7;\u0007" ], [ 2.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000142, "\u001b[?1h\u001b=" ], [ 0.00032, "\u001b[?2004h" ], [ 1.137641, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.26675, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.151609, "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" ], [ 0.11765, "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.158458, "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.149615, "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" ], [ 0.080657, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.144379, "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.104266, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.132218, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.202965, "\b\u001b[1m \u001b[1mt\u001b[0m\u001b[39m" ], [ 0.17807, "\b\u001b[1mt\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.123814, "\b\u001b[1mu\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.325016, "\b\u001b[1mn\u001b[1me this in every detail. See the docs for details. Here only a s\u001b[1mi\u001b[1mmple example:\u001b[0m\u001b[39m\u001b[K" ], [ 1.91505, "\u001b[?1l\u001b>" ], [ 0.000406, "\u001b[?2004l\r\r\n" ], [ 0.000684, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001347, "\u001b]7;\u0007" ], [ 0.001084, "\u001b]7;\u0007" ], [ 0.000116, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000118, "\u001b[?1h\u001b=" ], [ 0.000246, "\u001b[?2004h" ], [ 2.556304, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.198214, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.125589, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.147156, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.202848, " " ], [ 0.369539, "p" ], [ 0.228714, "r" ], [ 0.184236, "u" ], [ 0.154014, "n" ], [ 0.136362, "e" ], [ 0.94169, " " ], [ 0.44829, "-" ], [ 0.112062, "-" ], [ 0.37454, "l" ], [ 0.157195, "i" ], [ 0.116633, "s" ], [ 0.193515, "t" ], [ 0.486369, " " ], [ 0.442107, "-" ], [ 0.12257, "-" ], [ 0.403774, "k" ], [ 0.214488, "e" ], [ 0.771743, "e" ], [ 0.349591, "p" ], [ 0.352253, "-" ], [ 0.201267, "l" ], [ 0.109728, "a" ], [ 0.146296, "s" ], [ 0.130476, "t" ], [ 0.234998, " " ], [ 0.264266, "1" ], [ 0.429572, " " ], [ 0.505667, "-" ], [ 0.105697, "-" ], [ 0.294354, "d" ], [ 0.178175, "r" ], [ 0.239011, "y" ], [ 0.561933, "-" ], [ 0.220564, "r" ], [ 0.172983, "u" ], [ 0.138969, "n" ], [ 0.891028, "\u001b[?1l\u001b>" ], [ 0.004152, "\u001b[?2004l\r\r\n" ], [ 0.000975, "\u001b]2;borg prune --list --keep-last 1 --dry-run\u0007\u001b]1;borg\u0007" ], [ 0.658906, "Keeping archive: backup-block-device Sun, 2017-07-16 18:52:58 [5fd9732b4809252742a7cb3fadf2a971dd6371afd11a07944c0b5803d57c240f]\r\n" ], [ 0.000155, "Would prune: rugk-2017-07-16T18:52:19 Sun, 2017-07-16 18:52:19 [0de98f590b004ad7545f2013c4c9f2d4e3eed1415d177c89d6c2b7ff05918d2e]\r\n" ], [ 0.000118, "Would prune: rugk-2017-07-16T18:51:34 Sun, 2017-07-16 18:51:34 [d054cc411324d4bd848b39d1c9cad909073f9ff1a1a503a676d3e050be140396]\r\n" ], [ 6.5e-05, "Would prune: backup3 Fri, 2017-07-14 21:55:37 [36cd8fdf9b8b2e3bbb3fc2bb600acd48609efaf3a0880f900e0701a47ff69d4d]\r\n" ], [ 7.1e-05, "Would prune: backup2 Fri, 2017-07-14 21:54:56 [5aaf03d1c710cf774f9c9ff1c6317b621c14e519c6bac459f6d64b31e3bbd200]\r\n" ], [ 7.1e-05, "Would prune: backup1 Fri, 2017-07-14 21:54:06 [9758c7db339a066360bffad17b2ffac4fb368c6722c0be3a47a7a9b631f06407]\r\n" ], [ 0.047362, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001109, "\u001b]7;\u0007" ], [ 0.00093, "\u001b]7;\u0007" ], [ 7.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.8e-05, "\u001b[?1h\u001b=" ], [ 0.000163, "\u001b[?2004h" ], [ 2.173126, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.420696, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.658252, "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" ], [ 0.186236, "\b\u001b[1mW\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.09843, "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" ], [ 0.143515, "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.153626, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.136407, "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.170555, "\b\u001b[1ma\u001b[1mc\u001b[0m\u001b[39m" ], [ 0.157309, "\b\u001b[1mc\u001b[1mtually executing it in a script, you have to use it without the --dry\u001b[1m-\u001b[1mrun option, of course.\u001b[0m\u001b[39m\u001b[K" ], [ 2.08243, "\u001b[?1l\u001b>" ], [ 0.000512, "\u001b[?2004l\r\r\n" ], [ 0.000552, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001375, "\u001b]7;\u0007" ], [ 0.000922, "\u001b]7;\u0007" ], [ 3.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.1e-05, "\u001b[?1h\u001b=" ], [ 0.00026, "\u001b[?2004h" ], [ 1.169356, "\u001b[?1l\u001b>" ], [ 0.000602, "\u001b[?2004l\r\r\n" ], [ 0.000917, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001594, "\u001b]7;\u0007" ], [ 0.001826, "\u001b]7;\u0007" ], [ 7.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000204, "\u001b[?1h\u001b=" ], [ 0.000349, "\u001b[?2004h" ], [ 0.464206, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.135956, "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.484249, "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.299809, "\b\u001b[1m \u001b[1mR\u001b[0m\u001b[39m" ], [ 0.199072, "\b\u001b[1mR\u001b[1mE\u001b[0m\u001b[39m" ], [ 0.620669, "\b\u001b[1mE\u001b[1mSTORE ##\u001b[0m\u001b[39m" ], [ 0.924028, "\u001b[?1l\u001b>" ], [ 0.000399, "\u001b[?2004l\r\r\n" ], [ 0.000744, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001142, "\u001b]7;\u0007" ], [ 0.000834, "\u001b]7;\u0007" ], [ 9.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000124, "\u001b[?1h\u001b=" ], [ 0.000294, "\u001b[?2004h" ], [ 0.797042, "\u001b[?1l\u001b>" ], [ 0.000325, "\u001b[?2004l\r\r\n" ], [ 0.001543, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.002662, "\u001b]7;\u0007" ], [ 0.001568, "\u001b]7;\u0007" ], [ 4.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 9.4e-05, "\u001b[?1h\u001b=" ], [ 0.000185, "\u001b[?2004h" ], [ 0.705049, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.50212, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 1.210452, "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" ], [ 0.1987, "\b\u001b[1mW\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.12116, "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" ], [ 0.152173, "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.16582, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.378037, "\b\u001b[1m \u001b[1my\u001b[0m\u001b[39m" ], [ 0.330829, "\b\u001b[1my\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.180945, "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.152701, "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" ], [ 0.121298, "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" ], [ 0.148067, "\b\u001b[1mw\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.233865, "\b\u001b[1ma\u001b[1mnt to see the diff between two archives use this command.\u001b[0m\u001b[39m" ], [ 1.947763, "\u001b[?1l\u001b>" ], [ 0.000408, "\u001b[?2004l\r\r\n" ], [ 0.000607, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001033, "\u001b]7;\u0007" ], [ 0.000979, "\u001b]7;\u0007" ], [ 0.000127, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.2e-05, "\u001b[?1h\u001b=" ], [ 0.000278, "\u001b[?2004h" ], [ 0.693036, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.275798, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.281158, "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" ], [ 0.386709, "\b\u001b[1mE\u001b[1m.\u001b[0m\u001b[39m" ], [ 0.136187, "\b\u001b[1m.\u001b[1mg\u001b[0m\u001b[39m" ], [ 0.262011, "\b\u001b[1mg\u001b[1m.\u001b[0m\u001b[39m" ], [ 0.234889, "\b\u001b[1m.\u001b[1m \u001b[0m\u001b[39m" ], [ 0.361971, "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" ], [ 0.162798, "\b\u001b[1mw\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.077265, "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.148774, "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.34541, "\b\u001b[1mt\u001b[1m happened between the first two backups?\u001b[0m\u001b[39m" ], [ 1.295996, "\u001b[?1l\u001b>" ], [ 0.000733, "\u001b[?2004l\r\r\n" ], [ 0.001102, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001634, "\u001b]7;\u0007" ], [ 0.000634, "\u001b]7;\u0007" ], [ 5.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.4e-05, "\u001b[?1h\u001b=" ], [ 0.000303, "\u001b[?2004h" ], [ 0.441685, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.182795, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.072867, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.161104, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.179655, " " ], [ 0.154676, "d" ], [ 0.132421, "i" ], [ 0.124239, "f" ], [ 0.13999, "f" ], [ 0.624444, " " ], [ 0.862302, ":" ], [ 0.1169, ":" ], [ 0.274626, "b" ], [ 0.100778, "a" ], [ 0.188526, "c" ], [ 0.097402, "k" ], [ 0.144999, "u" ], [ 0.22317, "p" ], [ 0.167969, "1" ], [ 0.44642, " " ], [ 0.240129, "b" ], [ 0.164579, "a" ], [ 0.190471, "c" ], [ 0.136211, "k" ], [ 0.12257, "u" ], [ 0.258587, "p" ], [ 0.215453, "2" ], [ 1.160869, "\u001b[?1l\u001b>" ], [ 0.001983, "\u001b[?2004l\r\r\n" ], [ 0.000801, "\u001b]2;borg diff ::backup1 backup2\u0007\u001b]1;borg\u0007" ], [ 0.717522, "added 20 B Wallpaper/newfile.txt\r\n" ], [ 0.044186, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001157, "\u001b]7;\u0007" ], [ 0.000949, "\u001b]7;\u0007" ], [ 0.000108, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 9.3e-05, "\u001b[?1h\u001b=" ], [ 0.000147, "\u001b[?2004h" ], [ 1.545435, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.26435, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.178864, "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" ], [ 0.161899, "\b\u001b[1mA\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.240289, "\b\u001b[1mh\u001b[1m,\u001b[0m\u001b[39m" ], [ 0.132971, "\b\u001b[1m,\u001b[1m \u001b[0m\u001b[39m" ], [ 0.115812, "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" ], [ 0.111227, "\b\u001b[1mw\u001b[1me\u001b[0m\u001b[39m" ], [ 0.159647, "\b\u001b[1me\u001b[1m added a file, right…\u001b[0m\u001b[39m" ], [ 0.97686, "\u001b[?1l\u001b>" ], [ 0.000441, "\u001b[?2004l\r\r\n" ], [ 0.00091, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001031, "\u001b]7;\u0007" ], [ 0.000995, "\u001b]7;\u0007" ], [ 2.5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000141, "\u001b[?1h\u001b=" ], [ 0.000303, "\u001b[?2004h" ], [ 6.370198, "\u001b[?1l\u001b>" ], [ 0.000854, "\u001b[?2004l\r\r\n" ], [ 0.000815, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.003101, "\u001b]7;\u0007" ], [ 0.002831, "\u001b]7;\u0007" ], [ 0.000107, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000314, "\u001b[?1h\u001b=" ], [ 0.000499, "\u001b[?2004h" ], [ 0.580198, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.240323, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.29592, "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" ], [ 0.135389, "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.115437, "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" ], [ 0.157526, "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.624235, "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" ], [ 0.282742, "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" ], [ 0.133006, "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.206434, "\b\u001b[1ma\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.13301, "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" ], [ 0.255991, "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" ], [ 0.196416, "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.275594, "\b\u001b[1ma\u001b[1mlso other ways to extract the data.\u001b[0m\u001b[39m" ], [ 0.932018, "\u001b[?1l\u001b>" ], [ 0.001354, "\u001b[?2004l\r\r\n" ], [ 0.001071, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00297, "\u001b]7;\u0007" ], [ 0.002675, "\u001b]7;\u0007" ], [ 0.000154, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000231, "\u001b[?1h\u001b=" ], [ 0.000895, "\u001b[?2004h" ], [ 1.021752, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.238058, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.245484, "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" ], [ 0.719467, "\b\u001b[1mE\u001b[1m.\u001b[0m\u001b[39m" ], [ 0.151468, "\b\u001b[1m.\u001b[1mg\u001b[0m\u001b[39m" ], [ 0.183213, "\b\u001b[1mg\u001b[1m.\u001b[0m\u001b[39m" ], [ 0.599958, "\b\u001b[1m.\u001b[1m \u001b[0m\u001b[39m" ], [ 0.316279, "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.166858, "\b\u001b[1ma\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.551272, "\b\u001b[1ms\u001b[1m a tar archive.\u001b[0m\u001b[39m" ], [ 0.938861, "\u001b[?1l\u001b>" ], [ 0.000638, "\u001b[?2004l\r\r\n" ], [ 0.000793, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001159, "\u001b]7;\u0007" ], [ 0.000867, "\u001b]7;\u0007" ], [ 9.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.5e-05, "\u001b[?1h\u001b=" ], [ 0.000282, "\u001b[?2004h" ], [ 0.860998, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.189263, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.11245, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.133531, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.62438, " " ], [ 0.295845, "e" ], [ 0.165874, "x" ], [ 0.180501, "p" ], [ 0.166254, "o" ], [ 0.27793, "r" ], [ 0.113477, "t" ], [ 0.46559, "-" ], [ 0.34577, "t" ], [ 0.148398, "a" ], [ 0.17144, "r" ], [ 0.920527, " " ], [ 0.40208, "-" ], [ 0.108683, "-" ], [ 0.326944, "p" ], [ 0.195982, "r" ], [ 0.175632, "o" ], [ 0.229442, "g" ], [ 0.133505, "r" ], [ 0.171995, "e" ], [ 0.244119, "s" ], [ 0.154514, "s" ], [ 0.579295, " " ], [ 0.575201, ":" ], [ 0.112098, ":" ], [ 0.355392, "b" ], [ 0.110008, "a" ], [ 0.172393, "c" ], [ 0.080739, "k" ], [ 0.134163, "u" ], [ 0.221434, "p" ], [ 0.276712, "2" ], [ 0.6747, " " ], [ 0.372614, "b" ], [ 0.09319, "a" ], [ 0.152876, "c" ], [ 0.089531, "k" ], [ 0.150747, "u" ], [ 0.233879, "p" ], [ 0.273301, "." ], [ 0.354416, "t" ], [ 0.107034, "a" ], [ 0.144993, "r" ], [ 0.463039, "." ], [ 0.352906, "g" ], [ 0.133262, "z" ], [ 1.083854, "\u001b[?1l\u001b>" ], [ 0.004197, "\u001b[?2004l\r\r\n" ], [ 0.001109, "\u001b]2;borg export-tar --progress ::backup2 backup.tar.gz\u0007" ], [ 4.7e-05, "\u001b]1;borg\u0007" ], [ 0.679042, "Calculating size \r" ], [ 0.036244, " 0.0% Processing: Wallpaper/bigcollection/Macaws_USBingImage.jpg \r" ], [ 0.020857, " 0.1% Processing: Wallpaper/bigcollection/_A...r_the_town_of_Herrischried.jpg\r" ], [ 0.030544, " 0.2% Processing: Wallpaper/bigcollection/_A...her_Atlas__Marokko____Doug.jpg\r" ], [ 0.030864, " 0.3% Processing: Wallpaper/bigcollection/_A...ssar__S_d-Sulawesi__Indone.jpg\r" ], [ 0.030144, " 0.4% Processing: Wallpaper/bigcollection/_A..._N_he_von_Timimoun__Algeri.jpg\r" ], [ 0.027643, " 0.5% Processing: Wallpaper/bigcollection/_A...t_kleinen__aus_Servietten_.jpg\r" ], [ 0.03121, " 0.6% Processing: Wallpaper/bigcollection/_A...g_Norderoog__small_island_.jpg\r" ], [ 0.031343, " 0.7% Processing: Wallpaper/bigcollection/_A...im_Snowdonia-Nationalpark_.jpg\r" ], [ 0.031862, " 0.8% Processing: Wallpaper/bigcollection/_A...l_an_einem_Wasserloch_im_Q.jpg\r" ], [ 0.034847, " 0.9% Processing: Wallpaper/bigcollection/_A...nten____James_HagerOffset_.jpg\r" ], [ 0.033989, " 1.0% Processing: Wallpaper/bigcollection/_A...nen__Masai_Mara_National_R.jpg\r" ], [ 0.027388, " 1.1% Processing: Wallpaper/bigcollection/_A...ard_im_Londolozi-Wildreser.jpg\r" ], [ 0.026632, " 1.2% Processing: Wallpaper/bigcollection/_A...orning_fog__Aidling__Pfaff.jpg\r" ], [ 0.030864, " 1.3% Processing: Wallpaper/bigcollection/_A...hutzgebiet_Lewa_Wildlife_C.jpg\r" ], [ 0.029943, " 1.4% Processing: Wallpaper/bigcollection/_A...in_Delhi__Indien____AirPan.jpg\r" ], [ 0.035404, " 1.5% Processing: Wallpaper/bigcollection/_A...morial_Gardens_in_den_Dand.jpg\r" ], [ 0.030931, " 1.6% Processing: Wallpaper/bigcollection/_A...rthumberland__England____A.jpg\r" ], [ 0.035605, " 1.7% Processing: Wallpaper/bigcollection/_A...berg__Bayern__Deutschland_.jpg\r" ], [ 0.026827, " 1.8% Processing: Wallpaper/bigcollection/_A...ns_am_Little_Missouri_Rive.jpg\r" ], [ 0.030196, " 1.9% Processing: Wallpaper/bigcollection/_A...toberfest_in_Munich__Ger 1.jpg\r" ], [ 0.025763, " 2.0% Processing: Wallpaper/bigcollection/_A...toberfest_in_Munich__Ger 2.jpg\r" ], [ 0.025306, " 2.1% Processing: Wallpaper/bigcollection/_A...toberfest_in_Munich__Ger 3.jpg\r" ], [ 0.027286, " 2.2% Processing: Wallpaper/bigcollection/_A...Oktoberfest_in_Munich__Ger.jpg\r" ], [ 0.02806, " 2.3% Processing: Wallpaper/bigcollection/_A..._Florida-Scheibenanemone__.jpg\r" ], [ 0.032994, " 2.4% Processing: Wallpaper/bigcollection/_A...n__Nationalpark_Ankarafant.jpg\r" ], [ 0.033538, " 2.5% Processing: Wallpaper/bigcollection/_A..._der_N_he_von_Page__Arizon.jpg\r" ], [ 0.030034, " 2.6% Processing: Wallpaper/bigcollection/_A...r_Inselgruppe_L_archipel_d.jpg\r" ], [ 0.030477, " 2.7% Processing: Wallpaper/bigcollection/_A...land____Hercules_MilasAlam.jpg\r" ], [ 0.033376, " 2.8% Processing: Wallpaper/bigcollection/_A...maguchiFlickr_OpenGetty_Im.jpg\r" ], [ 0.032919, " 2.9% Processing: Wallpaper/bigcollection/_A...imetersubmillimeter_Array_.jpg\r" ], [ 0.027034, " 4.7% Processing: Wallpaper/bigcollection/_B...rairie_Creek_Redwoods_Stat.jpg\r" ], [ 0.034892, " 4.8% Processing: Wallpaper/bigcollection/_B...__Montana__USA____Jeff_Kro.jpg\r" ], [ 0.031042, " 4.9% Processing: Wallpaper/bigcollection/_B...gatta__Golf_von_Triest__It.jpg\r" ], [ 0.030521, " 5.0% Processing: Wallpaper/bigcollection/_B...and__Schleswig-Holstein__D.jpg\r" ], [ 0.028755, " 5.1% Processing: Wallpaper/bigcollection/_B..._Islands__Irland____Bart_B.jpg\r" ], [ 0.031129, " 5.2% Processing: Wallpaper/bigcollection/_B...e_im_Glacier-Nationalpark_.jpg\r" ], [ 0.032588, " 5.3% Processing: Wallpaper/bigcollection/_B...Nationalpark_Bayerischer_W.jpg\r" ], [ 0.025077, " 5.4% Processing: Wallpaper/bigcollection/_B...Arena_bei_Nacht__Stockholm.jpg\r" ], [ 0.027803, " 5.5% Processing: Wallpaper/bigcollection/_B...ner_Fernsehturm_w_hrend_de.jpg\r" ], [ 0.031262, " 5.6% Processing: Wallpaper/bigcollection/_B...nd__Bayern__Deutschland_mi.jpg\r" ], [ 0.031721, " 5.7% Processing: Wallpaper/bigcollection/_B...er__Schwarzwald__Baden-W_r.jpg\r" ], [ 0.032768, " 5.8% Processing: Wallpaper/bigcollection/_B...ebirge_oberhalb_der_Dad_s-.jpg\r" ], [ 0.030763, " 5.9% Processing: Wallpaper/bigcollection/_B...ngerburgbahn__Innsbruck___.jpg\r" ], [ 0.028673, " 7.6% Processing: Wallpaper/bigcollection/_B...rn_des__Wilson_Stump___ein.jpg\r" ], [ 0.029182, " 7.7% Processing: Wallpaper/bigcollection/_B...t_Jefferson-Wildschutzgebi.jpg\r" ], [ 0.029225, " 11.2% Processing: Wallpaper/bigcollection/_B...Saloum-Delta__Senegal____B.jpg\r" ], [ 0.030837, " 11.3% Processing: Wallpaper/bigcollection/_B..._Venedig__Italien____Digit.jpg\r" ], [ 0.034033, " 11.4% Processing: Wallpaper/bigcollection/_B..._Koblenz_und_Trier__Rheinl.jpg\r" ], [ 0.028958, " 11.5% Processing: Wallpaper/bigcollection/_B..._Baden-W_rttemberg__Deutsc.jpg\r" ], [ 0.025933, " 11.6% Processing: Wallpaper/bigcollection/_B..._Bisingen__Baden-W_rttembe.jpg\r" ], [ 0.030318, " 11.7% Processing: Wallpaper/bigcollection/_B...in_Koknese__Lettland____An.jpg\r" ], [ 0.029535, " 11.8% Processing: Wallpaper/bigcollection/_C...__Deutschland____R_diger_H.jpg\r" ], [ 0.032432, " 11.9% Processing: Wallpaper/bigcollection/_C...Toulouse__D_partement_Haut.jpg\r" ], [ 0.032966, " 12.0% Processing: Wallpaper/bigcollection/_C...pring__Germany____Boris_St.jpg\r" ], [ 0.024881, " 12.1% Processing: Wallpaper/bigcollection/_C...pring__Germany____Boris_St.jpg\r" ], [ 0.02818, " 12.2% Processing: Wallpaper/bigcollection/_C...Mallorca__Balearische_Inse.jpg\r" ], [ 0.029353, " 12.3% Processing: Wallpaper/bigcollection/_C...A__ESA__N._Smith__Universi.jpg\r" ], [ 0.03626, " 12.4% Processing: Wallpaper/bigcollection/_C...gebr_cke_bei_Ballintoy__Co.jpg\r" ], [ 0.025838, " 12.5% Processing: Wallpaper/bigcollection/_C...gebr_cke_bei_Ballintoy__Co.jpg\r" ], [ 0.027176, " 12.6% Processing: Wallpaper/bigcollection/_C...lona__Spanien____Nora_De_A.jpg\r" ], [ 0.0298, " 12.7% Processing: Wallpaper/bigcollection/_C...rcia__Nationalpark_Monti_S.jpg\r" ], [ 0.027672, " 12.8% Processing: Wallpaper/bigcollection/_C...vinz_Potenza__Italien____F.jpg\r" ], [ 0.032259, " 12.9% Processing: Wallpaper/bigcollection/_C...semite-Nationalpark__Kalif.jpg\r" ], [ 0.031451, " 13.0% Processing: Wallpaper/bigcollection/_C...um_Ludwig__Cologne__German.jpg\r" ], [ 0.030096, " 13.1% Processing: Wallpaper/bigcollection/_C..._Ludwig__Cologne__North_ 1.jpg\r" ], [ 0.028235, " 15.1% Processing: Wallpaper/bigcollection/_D...n_Hannover_bei_Nacht____Ma.jpg\r" ], [ 0.028761, " 15.2% Processing: Wallpaper/bigcollection/_D...rieb_befindliche_Opernhaus.jpg\r" ], [ 0.027439, " 15.3% Processing: Wallpaper/bigcollection/_D...esert_View_Watchtower__Gra.jpg\r" ], [ 0.028598, " 15.4% Processing: Wallpaper/bigcollection/_D..._Provinz_Shaanxi__Volksrep.jpg\r" ], [ 0.031617, " 15.5% Processing: Wallpaper/bigcollection/_D...gr__t_den_Hund_Sudo_in_Mel.jpg\r" ], [ 0.032865, " 17.5% Processing: Wallpaper/bigcollection/_D...s_du_Tarn__Nationalpark_Ce.jpg\r" ], [ 0.031736, " 17.6% Processing: Wallpaper/bigcollection/_D..._he_von_Sens__D_partement_.jpg\r" ], [ 0.030474, " 17.7% Processing: Wallpaper/bigcollection/_D...__Wales__Vereinigtes_K_nig.jpg\r" ], [ 0.026112, " 20.5% Processing: Wallpaper/bigcollection/_E...Junges_schnuppert_an_einer.jpg\r" ], [ 0.027898, " 20.6% Processing: Wallpaper/bigcollection/_E...chen_versteckt_sich_in_ein.jpg\r" ], [ 0.027202, " 20.7% Processing: Wallpaper/bigcollection/_E...r_Frosch_in_einem_Wassertr.jpg\r" ], [ 0.027615, " 20.8% Processing: Wallpaper/bigcollection/_E...ekorierter_Saguaro-Kaktus_.jpg\r" ], [ 0.028446, " 20.9% Processing: Wallpaper/bigcollection/_E...e__berquert_den_Luangwa-Fl.jpg\r" ], [ 0.031808, " 21.0% Processing: Wallpaper/bigcollection/_E...ngstunnel_zur_Felsenkirche.jpg\r" ], [ 0.031065, " 22.7% Processing: Wallpaper/bigcollection/_E...n_Koblenz_and_Trier__Germa.jpg\r" ], [ 0.033059, " 22.8% Processing: Wallpaper/bigcollection/_E...n_Angola_und_Namibia____Fr.jpg\r" ], [ 0.035115, " 22.9% Processing: Wallpaper/bigcollection/_E...r_Olympischen_Spiele_1896_.jpg\r" ], [ 0.032507, " 23.0% Processing: Wallpaper/bigcollection/_E..._Fr_hlingskrokus__Almwiese.jpg\r" ], [ 0.028219, " 23.1% Processing: Wallpaper/bigcollection/_E...in_der_Meeresbucht_Cathedr.jpg\r" ], [ 0.029551, " 23.2% Processing: Wallpaper/bigcollection/_E..._Nationalpark_Bayerischer_.jpg\r" ], [ 0.02746, " 23.3% Processing: Wallpaper/bigcollection/_E...im_Nationalpark_Bayerische.jpg\r" ], [ 0.028081, " 23.4% Processing: Wallpaper/bigcollection/_E...im__umava-Nationalpark__Ts.jpg\r" ], [ 0.027796, " 23.5% Processing: Wallpaper/bigcollection/_E..._Emsland__Germany____Erh 1.jpg\r" ], [ 0.026053, " 25.4% Processing: Wallpaper/bigcollection/_F...chersee_J_kuls_rl_n__Islan.jpg\r" ], [ 0.029312, " 25.5% Processing: Wallpaper/bigcollection/_F...Yellowstone_Nationalpark__.jpg\r" ], [ 0.029189, " 25.6% Processing: Wallpaper/bigcollection/_F...yi__Provinz_Phang-nga__Tha.jpg\r" ], [ 0.029535, " 25.7% Processing: Wallpaper/bigcollection/_F..._Tree_River__Kitikmeot_Reg.jpg\r" ], [ 0.031935, " 25.8% Processing: Wallpaper/bigcollection/_F...ystad__Niederlande____Erns.jpg\r" ], [ 0.034076, " 25.9% Processing: Wallpaper/bigcollection/_F...kyline_von_Baku__Aserbaids.jpg\r" ], [ 0.028655, " 26.0% Processing: Wallpaper/bigcollection/_F..._New_York_City__Bundesstaa.jpg\r" ], [ 0.030152, " 26.1% Processing: Wallpaper/bigcollection/_F...wals__Cierva_Cove__Antarkt.jpg\r" ], [ 0.030983, " 26.2% Processing: Wallpaper/bigcollection/_F..._des_Norman_River__Queensl.jpg\r" ], [ 0.027019, " 27.4% Processing: Wallpaper/bigcollection/_G..._Ger_llhang_im_Rondane-Nat.jpg\r" ], [ 0.027058, " 27.5% Processing: Wallpaper/bigcollection/_G...tzgebiet_Sacramento_Nation.jpg\r" ], [ 0.038515, " 27.6% Processing: Wallpaper/bigcollection/_G...Villandry__Loiretal__Frank.jpg\r" ], [ 0.024219, " 27.7% Processing: Wallpaper/bigcollection/_G...Villandry__Loiretal__Frank.jpg\r" ], [ 0.028063, " 27.8% Processing: Wallpaper/bigcollection/_G...__Champion-Insel__Floreana.jpg\r" ], [ 0.030237, " 27.9% Processing: Wallpaper/bigcollection/_G...__R_bida__Gal_pagosinseln_.jpg\r" ], [ 0.031455, " 28.0% Processing: Wallpaper/bigcollection/_G...c-Nationalpark__Alaska__US.jpg\r" ], [ 0.028409, " 28.1% Processing: Wallpaper/bigcollection/_G...um_Bridge__Newcastle_upon_.jpg\r" ], [ 0.031595, " 28.2% Processing: Wallpaper/bigcollection/_G..._Kanal_in_Venedig__Italien.jpg\r" ], [ 0.031079, " 28.3% Processing: Wallpaper/bigcollection/_G..._Rock_Canyon__Waterton-Lak.jpg\r" ], [ 0.028272, " 30.5% Processing: Wallpaper/bigcollection/_G...oos_in_der_Gro_aufnahme___.jpg\r" ], [ 0.034208, " 30.6% Processing: Wallpaper/bigcollection/_G...iesel__Bayern__Deutschland.jpg\r" ], [ 0.034016, " 30.7% Processing: Wallpaper/bigcollection/_G...__ber_dem_Thunersee__Berne.jpg\r" ], [ 0.0292, " 30.8% Processing: Wallpaper/bigcollection/_G...ell-St.-Elias-Nationalpark.jpg\r" ], [ 0.024942, " 32.8% Processing: Wallpaper/bigcollection/_G..._bei_Mettlach__Saarland__D.jpg\r" ], [ 0.031677, " 32.9% Processing: Wallpaper/bigcollection/_G...ngxia-Zuchtstation__Ya_an_.jpg\r" ], [ 0.031108, " 33.9% Processing: Wallpaper/bigcollection/_H...kaido__Japan____JTB_Media_.jpg\r" ], [ 0.030964, " 35.9% Processing: Wallpaper/bigcollection/_H...ew_RussellVisuals_Unlimite.jpg\r" ], [ 0.026577, " 38.9% Processing: Wallpaper/bigcollection/_I...eutschen_Doms__Gendarmenma.jpg\r" ], [ 0.031898, " 39.0% Processing: Wallpaper/bigcollection/_I...ukuoka_Tower__Fukuoka__Jap.jpg\r" ], [ 0.031693, " 39.1% Processing: Wallpaper/bigcollection/_I...__Bermeo__Provinz_Bizkaia_.jpg\r" ], [ 0.026825, " 39.2% Processing: Wallpaper/bigcollection/_I...P_tzcuaro-See__Bundesstaat.jpg\r" ], [ 0.030749, " 41.0% Processing: Wallpaper/bigcollection/_J...ia-Nationalpark__Maine__US.jpg\r" ], [ 0.032301, " 41.1% Processing: Wallpaper/bigcollection/_J..._im_Moremi_Game_Reserve__O.jpg\r" ], [ 0.031689, " 42.2% Processing: Wallpaper/bigcollection/_K...n_in_der_Antarktis____Jan_.jpg\r" ], [ 0.029222, " 42.3% Processing: Wallpaper/bigcollection/_K...e_Washington__Antarktis___.jpg\r" ], [ 0.174039, " 42.4% Processing: Wallpaper/bigcollection/_K...K_ken__Snow_Hill_Island__A.jpg\r" ], [ 0.03322, " 42.5% Processing: Wallpaper/bigcollection/_K...SCO-Welterbest_tte__Trier_.jpg\r" ], [ 0.031657, " 43.4% Processing: Wallpaper/bigcollection/_K...ufort__South_Carolina__USA.jpg\r" ], [ 0.026738, " 43.5% Processing: Wallpaper/bigcollection/_K...chfang_vor_Port_St._Johns_.jpg\r" ], [ 0.033834, " 44.4% Processing: Wallpaper/bigcollection/_K...eide__Schottisches_Hochlan.jpg\r" ], [ 0.034061, " 44.5% Processing: Wallpaper/bigcollection/_K...m_Schlossgarten_Schwetzing.jpg\r" ], [ 0.033845, " 44.6% Processing: Wallpaper/bigcollection/_K...dscha__Kachetien__Georgien.jpg\r" ], [ 0.031383, " 44.7% Processing: Wallpaper/bigcollection/_K..._Baden-W_rttemberg__Deutsc.jpg\r" ], [ 0.027515, " 44.8% Processing: Wallpaper/bigcollection/_K..._Zanskar__Region_Ladakh__B.jpg\r" ], [ 0.031935, " 44.9% Processing: Wallpaper/bigcollection/_K...Meteora__Griechenland____S.jpg\r" ], [ 0.030994, " 45.0% Processing: Wallpaper/bigcollection/_K...-Ville__Belgien____Patty_P.jpg\r" ], [ 0.031632, " 46.8% Processing: Wallpaper/bigcollection/_L...ionalpark__Simbabwe____Jer.jpg\r" ], [ 0.032645, " 46.9% Processing: Wallpaper/bigcollection/_L...Hochland_von_Cuenca__Auton.jpg\r" ], [ 0.028682, " 47.0% Processing: Wallpaper/bigcollection/_L...Hochland_von_Cuenca__Auton.jpg\r" ], [ 0.030087, " 47.1% Processing: Wallpaper/bigcollection/_L...__Axel_Flasbarth500px_____.jpg\r" ], [ 0.030684, " 47.2% Processing: Wallpaper/bigcollection/_L...athedrale_von_Chartres__Fr.jpg\r" ], [ 0.029522, " 47.3% Processing: Wallpaper/bigcollection/_L...und_Aiguilles_de_Chamonix_.jpg\r" ], [ 0.032174, " 47.4% Processing: Wallpaper/bigcollection/_L...Nutthavood_Punpeng500px___.jpg\r" ], [ 0.029075, " 47.5% Processing: Wallpaper/bigcollection/_L...nd__Great_Barrier_Reef__Au.jpg\r" ], [ 0.028973, " 47.6% Processing: Wallpaper/bigcollection/_L...__Insel_Corvo__Portugal___.jpg\r" ], [ 0.030047, " 47.7% Processing: Wallpaper/bigcollection/_L...ationalpark__British_Colum.jpg\r" ], [ 0.031497, " 49.3% Processing: Wallpaper/bigcollection/_L...hof__Great_Court__des_Brit.jpg\r" ], [ 0.029466, " 49.4% Processing: Wallpaper/bigcollection/_L...em_Wald_auf_der_Insel_Shik.jpg\r" ], [ 0.025178, " 49.5% Processing: Wallpaper/bigcollection/_L...er_K_ste_von_Ixtapa_Zihuat.jpg\r" ], [ 0.030228, " 49.6% Processing: Wallpaper/bigcollection/_L...e_Itapu__in_Salvador__Bahi.jpg\r" ], [ 0.027644, " 49.7% Processing: Wallpaper/bigcollection/_L...l_Point_in_der_N_he_von_Po.jpg\r" ], [ 0.026513, " 49.8% Processing: Wallpaper/bigcollection/_L...eversand__Westerhever__Sch.jpg\r" ], [ 0.032316, " 49.9% Processing: Wallpaper/bigcollection/_L...i__Provinz_Jiangsu__Volksr.jpg\r" ], [ 0.026983, " 50.0% Processing: Wallpaper/bigcollection/_L...g__aufgenommen_von_der_Int.jpg\r" ], [ 0.03107, " 51.7% Processing: Wallpaper/bigcollection/_M..._Cay__Exuma__Bahamas____Ji.jpg\r" ], [ 0.028123, " 51.8% Processing: Wallpaper/bigcollection/_M...ationalpark_Jardines_de_la.jpg\r" ], [ 0.028547, " 51.9% Processing: Wallpaper/bigcollection/_M...au____WaterFrameAlamy_____.jpg\r" ], [ 0.030092, " 53.1% Processing: Wallpaper/bigcollection/_M...ands-Nationalpark__Utah__U.jpg\r" ], [ 0.027589, " 53.2% Processing: Wallpaper/bigcollection/_M...useum_in_den_Wolken__Monte.jpg\r" ], [ 0.029779, " 53.3% Processing: Wallpaper/bigcollection/_M...Plaza_de_la_Encarnaci_n__S.jpg\r" ], [ 0.031154, " 54.6% Processing: Wallpaper/bigcollection/_M...lmie_National_Forest__Bund.jpg\r" ], [ 0.03317, " 54.7% Processing: Wallpaper/bigcollection/_M...t_Edziza_Provincial_Park__.jpg\r" ], [ 0.031631, " 54.8% Processing: Wallpaper/bigcollection/_M...__Washington__USA____Diane.jpg\r" ], [ 0.025722, " 56.1% Processing: Wallpaper/bigcollection/_N..._K_ste_des_Atlantischen_Oz.jpg\r" ], [ 0.029888, " 56.2% Processing: Wallpaper/bigcollection/_N...hee__Schiras__Iran____R.Cr.jpg\r" ], [ 0.022761, " 57.5% Processing: Wallpaper/bigcollection/_N...Fischotter_im_Yellowstone-.jpg\r" ], [ 0.030469, " 57.6% Processing: Wallpaper/bigcollection/_N..._Baumstachler____Minden_Pi.jpg\r" ], [ 0.032258, " 58.9% Processing: Wallpaper/bigcollection/_O...-Park__Bomarzo__Italien___.jpg\r" ], [ 0.028556, " 59.0% Processing: Wallpaper/bigcollection/_O...-Park__Bomarzo__Italien___.jpg\r" ], [ 0.029665, " 60.4% Processing: Wallpaper/bigcollection/_P..._der_Boardman_Tree_Farm__B.jpg\r" ], [ 0.030072, " 60.5% Processing: Wallpaper/bigcollection/_P...o-Ebene__Italien____Eddy_G.jpg\r" ], [ 0.034601, " 60.6% Processing: Wallpaper/bigcollection/_P...nem_Karnevalswagen_beim_Ro.jpg\r" ], [ 0.029305, " 61.9% Processing: Wallpaper/bigcollection/_P...der_argentinischen_Atlanti.jpg\r" ], [ 0.03045, " 62.0% Processing: Wallpaper/bigcollection/_P...m__Pilsum__Niedersachsen__.jpg\r" ], [ 0.02941, " 63.4% Processing: Wallpaper/bigcollection/_P...rk_Torres_del_Paine__Chile.jpg\r" ], [ 0.033345, " 63.5% Processing: Wallpaper/bigcollection/_P...i-Nationalpark__New_South_.jpg\r" ], [ 0.031818, " 64.9% Processing: Wallpaper/bigcollection/_R...ationalpark_Sarek__Schwede.jpg\r" ], [ 0.025656, " 65.0% Processing: Wallpaper/bigcollection/_R...ationalpark_Sarek__Schwede.jpg\r" ], [ 0.030751, " 66.6% Processing: Wallpaper/bigcollection/_R...nyang__Provinz_Yunnan__Chi.jpg\r" ], [ 0.030313, " 66.7% Processing: Wallpaper/bigcollection/_R...n_Ludwig_XIV._auf_dem_Plac.jpg\r" ], [ 0.032915, " 68.6% Processing: Wallpaper/bigcollection/_R...r____Getty_Images______Bin.jpg\r" ], [ 0.029504, " 70.1% Processing: Wallpaper/bigcollection/_S...tional_Park__Germany____ 3.jpg\r" ], [ 0.026571, " 70.2% Processing: Wallpaper/bigcollection/_S...tional_Park__Germany____ 4.jpg\r" ], [ 0.032136, " 71.7% Processing: Wallpaper/bigcollection/_S...e_t_sich_als_Wasserfall_vo.jpg\r" ], [ 0.032883, " 72.6% Processing: Wallpaper/bigcollection/_S...riehunde_im_Wind_Cave_Nati.jpg\r" ], [ 0.031602, " 72.7% Processing: Wallpaper/bigcollection/_S...erkstatt__Hexenlochm_hle__.jpg\r" ], [ 0.030634, " 73.6% Processing: Wallpaper/bigcollection/_S...en_in_der_Son_Doong-H_hle_.jpg\r" ], [ 0.027026, " 74.5% Processing: Wallpaper/bigcollection/_S..._at_sunset__Attendorn__Sau.jpg\r" ], [ 0.038777, " 75.4% Processing: Wallpaper/bigcollection/_S..._Dartmoor-Nationalpark__De.jpg\r" ], [ 0.027422, " 75.5% Processing: Wallpaper/bigcollection/_S..._der_Halong-Bucht__Vietnam.jpg\r" ], [ 0.027539, " 76.3% Processing: Wallpaper/bigcollection/_S...em_See__Bergpark_Wilhelmsh.jpg\r" ], [ 0.031058, " 76.4% Processing: Wallpaper/bigcollection/_S...ge_in_den_Ausl_ufern_der_R.jpg\r" ], [ 0.036506, " 77.6% Processing: Wallpaper/bigcollection/_S..._Geothermalgebiet_Haukadal.jpg\r" ], [ 0.025063, " 77.7% Processing: Wallpaper/bigcollection/_S...ampagne-Ardennes__Frankrei.jpg\r" ], [ 0.029054, " 77.8% Processing: Wallpaper/bigcollection/_S...r__ber_West_Point__Nebrask.jpg\r" ], [ 0.028908, " 77.9% Processing: Wallpaper/bigcollection/_S...n-Bodenstation__Longyearby.jpg\r" ], [ 0.029276, " 78.0% Processing: Wallpaper/bigcollection/_S..._und_Solidarit_t_Kerzen__K.jpg\r" ], [ 0.024812, " 79.0% Processing: Wallpaper/bigcollection/_T..._Blatt_eines_Per_ckenstrau.jpg\r" ], [ 0.031898, " 80.0% Processing: Wallpaper/bigcollection/_T...er_Bavaria__Germany____F 3.jpg\r" ], [ 0.029189, " 80.1% Processing: Wallpaper/bigcollection/_T...pper_Bavaria__Germany____F.jpg\r" ], [ 0.028065, " 81.2% Processing: Wallpaper/bigcollection/_T...Image_BrokerRex_Features 1.jpg\r" ], [ 0.03116, " 81.3% Processing: Wallpaper/bigcollection/_T...n__Baden-W_rttemberg__Deut.jpg\r" ], [ 0.026524, " 82.3% Processing: Wallpaper/bigcollection/_U..._seltene_Blattschwanzgecko.jpg\r" ], [ 0.028383, " 82.4% Processing: Wallpaper/bigcollection/_V...en_und_Altstadt_von_Chania.jpg\r" ], [ 0.032476, " 83.5% Processing: Wallpaper/bigcollection/_V..._Hirta__St._Kilda__Schottl.jpg\r" ], [ 0.030701, " 84.7% Processing: Wallpaper/bigcollection/_W...wald__Insel_Sula__Solund__.jpg\r" ], [ 0.034129, " 84.8% Processing: Wallpaper/bigcollection/_W...nschafe__Kanton_Wallis__ 1.jpg\r" ], [ 0.03033, " 85.8% Processing: Wallpaper/bigcollection/_W...ionalpark_Plitvicer_Seen__.jpg\r" ], [ 0.031761, " 87.2% Processing: Wallpaper/bigcollection/_W..._N_he_von_Ca_amares__Provi.jpg\r" ], [ 0.031627, " 87.3% Processing: Wallpaper/bigcollection/_W..._N_he_von_Cuenca__Spanien_.jpg\r" ], [ 0.024242, " 88.3% Processing: Wallpaper/bigcollection/_W...jeu__D_partement_Rh_ne__Re.jpg\r" ], [ 0.027362, " 89.3% Processing: Wallpaper/bigcollection/_W...guna_Colorada__Bolivien___.jpg\r" ], [ 0.031448, " 90.5% Processing: Wallpaper/bigcollection/_Z..._von_Autobahnen_in_Bangkok.jpg\r" ], [ 0.027535, " 90.6% Processing: Wallpaper/bigcollection/_Z...abara-Bucht__Rio_de_Janeir.jpg\r" ], [ 0.025329, " 92.1% Processing: Wallpaper/bigcollection/__...ptur_der_Landart-K_nstleri.jpg\r" ], [ 0.044106, " 92.2% Processing: Wallpaper/bigcollection/__...__Magic_Mountain_-Landmark.jpg\r" ], [ 0.03068, " 93.5% Processing: Wallpaper/bigcollection/_F...rte_Marina_Bay_zum_50._Nat.jpg\r" ], [ 0.031039, " 93.6% Processing: Wallpaper/bigcollection/_H...ing_Crane_Pond_Conservancy.jpg\r" ], [ 0.020685, " 95.0% Processing: Wallpaper/2048example/Palo...t_by_Beth___Jeremy_Jonkman.jpg\r" ], [ 0.019863, " 96.3% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" ], [ 0.056069, " 96.4% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" ], [ 0.049869, " 97.4% Processing: Wallpaper/evenmore/ChipDE 06.jpg \r" ], [ 0.021021, " 97.5% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" ], [ 0.019135, " 98.4% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" ], [ 0.021483, " 99.6% Processing: Wallpaper/deer.jpg ... \r" ], [ 0.021593, " 99.7% Processing: Wallpaper/deer.jpg ... \r" ], [ 0.02037, " 99.8% Processing: Wallpaper/deer.jpg ... \r" ], [ 0.027858, " 99.9% Processing: Wallpaper/deer.jpg ... \r" ], [ 0.020864, " \r" ], [ 0.077955, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001068, "\u001b]7;\u0007" ], [ 0.000836, "\u001b]7;\u0007" ], [ 0.000104, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.3e-05, "\u001b[?1h\u001b=" ], [ 0.000234, "\u001b[?2004h" ], [ 2.471911, "\u001b[32ml\u001b[39m" ], [ 0.102688, "\b\u001b[32ml\u001b[32ms\u001b[39m" ], [ 0.272296, " " ], [ 0.220114, "-" ], [ 0.157165, "l" ], [ 0.074368, "a" ], [ 0.353976, "\u001b[?1l\u001b>" ], [ 0.000755, "\u001b[?2004l\r\r\n" ], [ 0.000778, "\u001b]2;ls --color=tty -la\u0007\u001b]1;ls\u0007" ], [ 0.001633, "total 573616\r\n" ], [ 1.9e-05, "drwxr-xr-x. 4 rugk rugk 4096 Jul 16 18:56 \u001b[0m\u001b[38;5;33m.\u001b[0m\r\ndrwxr-x---. 55 rugk rugk 4096 Jul 16 18:57 \u001b[38;5;33m..\u001b[0m\r\ndrwx------. 2 rugk rugk 4096 Jul 14 21:57 \u001b[38;5;33mWallpaper\u001b[0m\r\ndrwxr-xr-x. 6 rugk rugk 4096 Jul 14 21:55 \u001b[38;5;33mWallpaper.orig\u001b[0m\r\n-rw-------. 1 rugk rugk 587361454 Jul 16 18:57 \u001b[38;5;9mbackup.tar.gz\u001b[0m\r\n" ], [ 0.000404, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001103, "\u001b]7;\u0007" ], [ 0.000992, "\u001b]7;\u0007" ], [ 7.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.9e-05, "\u001b[?1h\u001b=" ], [ 0.000158, "\u001b[?2004h" ], [ 3.04506, "\u001b[?1l\u001b>" ], [ 0.000385, "\u001b[?2004l\r\r\n" ], [ 0.000485, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001512, "\u001b]7;\u0007" ], [ 0.001245, "\u001b]7;\u0007" ], [ 6.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000129, "\u001b[?1h\u001b=" ], [ 0.000247, "\u001b[?2004h" ], [ 0.325892, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.228892, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.186392, "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" ], [ 0.112073, "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.139024, "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.151793, "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" ], [ 0.106484, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.147932, "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.181458, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.137456, "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" ], [ 0.21885, "\b\u001b[1m \u001b[1me\u001b[0m\u001b[39m" ], [ 0.170788, "\b\u001b[1me\u001b[1mv\u001b[0m\u001b[39m" ], [ 0.133285, "\b\u001b[1mv\u001b[1me\u001b[0m\u001b[39m" ], [ 0.28717, "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.485291, "\b\u001b[1mn\u001b[1m mount an archive or even the whole repository:\u001b[0m\u001b[39m" ], [ 1.036008, "\u001b[?1l\u001b>" ], [ 0.001535, "\u001b[?2004l\r\r\n" ], [ 0.001777, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.002934, "\u001b]7;\u0007" ], [ 0.002695, "\u001b]7;\u0007" ], [ 0.00014, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 9.4e-05, "\u001b[?1h\u001b=" ], [ 0.000881, "\u001b[?2004h" ], [ 1.264493, "\u001b[1m\u001b[31mm\u001b[0m\u001b[39m" ], [ 0.158178, "\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[0m\u001b[39m" ], [ 0.129344, "\b\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[1m\u001b[31md\u001b[0m\u001b[39m" ], [ 0.153746, "\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.106254, "\b\b\b\b\u001b[0m\u001b[32mm\u001b[0m\u001b[32mk\u001b[0m\u001b[32md\u001b[0m\u001b[32mi\u001b[32mr\u001b[39m" ], [ 0.178794, " " ], [ 0.328222, "\u001b[4m/\u001b[24m" ], [ 0.202794, "\b\u001b[4m/\u001b[4mt\u001b[24m" ], [ 0.246443, "\b\u001b[4mt\u001b[4mm\u001b[24m" ], [ 0.207634, "\b\u001b[4mm\u001b[4mp\u001b[24m" ], [ 0.88273, "\b\u001b[4mp\u001b[4m/\u001b[24m" ], [ 0.339887, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.210076, "\b\u001b[4mm\u001b[4mo\u001b[24m" ], [ 0.16667, "\b\b\b\b\b\b\b\u001b[24m/\u001b[24mt\u001b[24mm\u001b[24mp\u001b[24m/\u001b[24mm\u001b[24mou" ], [ 0.141564, "n" ], [ 0.184, "t" ], [ 1.4607, "\u001b[?1l\u001b>" ], [ 0.001306, "\u001b[?2004l\r\r\n" ], [ 0.000794, "\u001b]2;mkdir /tmp/mount\u0007" ], [ 6.6e-05, "\u001b]1;mkdir\u0007" ], [ 0.00176, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00142, "\u001b]7;\u0007" ], [ 0.001308, "\u001b]7;\u0007" ], [ 7.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " ], [ 1.3e-05, "\u001b[K" ], [ 9.1e-05, "\u001b[?1h\u001b=" ], [ 0.000273, "\u001b[?2004h" ], [ 1.09686, "\u001b[4mb\u001b[24m" ], [ 0.187046, "\b\u001b[24m\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.10907, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.12414, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.187573, " " ], [ 0.229364, "m" ], [ 0.195942, "o" ], [ 0.183861, "u" ], [ 0.138559, "n" ], [ 0.207537, "t" ], [ 1.01571, " " ], [ 0.55086, ":" ], [ 0.110713, ":" ], [ 0.27265, " " ], [ 0.462869, "\u001b[4m/\u001b[24m" ], [ 0.795464, "\b\u001b[4m/\u001b[4mt\u001b[24m" ], [ 0.295092, "\b\u001b[4mt\u001b[4mm\u001b[24m" ], [ 0.200509, "\b\u001b[4mm\u001b[4mp\u001b[24m" ], [ 0.878464, "\b\u001b[4mp\u001b[4m/\u001b[24m" ], [ 0.306666, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.24341, "\b\u001b[4mm\u001b[4mo\u001b[24m" ], [ 0.166203, "\b\u001b[4mo\u001b[4mu\u001b[24m" ], [ 0.138953, "\b\u001b[4mu\u001b[4mn\u001b[24m" ], [ 0.177723, "\b\u001b[4mn\u001b[4mt\u001b[24m" ], [ 1.371278, "\u001b[?1l\u001b>" ], [ 0.001184, "\u001b[?2004l\r\r\n" ], [ 0.000603, "\u001b]2;borg mount :: /tmp/mount\u0007\u001b]1;borg\u0007" ], [ 0.651025, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001453, "\u001b]7;\u0007" ], [ 0.000984, "\u001b]7;\u0007" ], [ 7.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 2.6e-05, "\u001b[?1h\u001b=" ], [ 0.0002, "\u001b[?2004h" ], [ 1.860515, "\u001b[32ml\u001b[39m" ], [ 0.107896, "\b\u001b[32ml\u001b[32ms\u001b[39m" ], [ 0.253911, " " ], [ 0.203092, "-" ], [ 0.178525, "l" ], [ 0.111795, "a" ], [ 0.200138, " " ], [ 0.353001, "\u001b[4m/\u001b[24m" ], [ 0.264827, "\b\u001b[4m/\u001b[4mt\u001b[24m" ], [ 0.205749, "\b\u001b[4mt\u001b[4mm\u001b[24m" ], [ 0.168679, "\b\u001b[4mm\u001b[4mp\u001b[24m" ], [ 0.016649, "\b\b\b\b\u001b[24m/\u001b[24mt\u001b[24mm\u001b[24mp" ], [ 0.712108, "\b\b\b\b\u001b[4m/\u001b[4mt\u001b[4mm\u001b[4mp\u001b[24m \b" ], [ 0.383057, "\b\u001b[4mp\u001b[4m/\u001b[24m" ], [ 0.159994, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.187645, "\b\u001b[4mm\u001b[4mo\u001b[24m" ], [ 0.168813, "\b\u001b[4mo\u001b[4mu\u001b[24m" ], [ 0.12933, "\b\u001b[4mu\u001b[4mn\u001b[24m" ], [ 0.421583, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.018359, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mls\u001b[39m -la \u001b[4m/tmp/mount\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" ], [ 0.602087, "\b\b\u001b[4mt\u001b[24m\u001b[0m\u001b[24m \b" ], [ 2.5e-05, "\u001b[?1l\u001b>" ], [ 0.000874, "\u001b[?2004l\r\r\n" ], [ 0.000682, "\u001b]2;ls --color=tty -la /tmp/mount\u0007\u001b]1;ls\u0007" ], [ 0.002495, "total 0\r\n" ], [ 4.4e-05, "drwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[0m\u001b[38;5;33m.\u001b[0m\r\ndrwxrwxrwt. 27 root root 660 Jul 16 18:58 \u001b[48;5;10;38;5;16m..\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup-block-device\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup1\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup2\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup3\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mrugk-2017-07-16T18:51:34\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mrugk-2017-07-16T18:52:19\u001b[0m\r\n" ], [ 0.000169, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000984, "\u001b]7;\u0007" ], [ 0.00097, "\u001b]7;\u0007" ], [ 2.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000122, "\u001b[?1h\u001b=" ], [ 0.000251, "\u001b[?2004h" ], [ 0.482339, "\u001b[4mb\u001b[24m" ], [ 0.179808, "\b\u001b[24m\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.105817, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.116974, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.173004, " " ], [ 0.216291, "u" ], [ 0.168732, "m" ], [ 0.228004, "o" ], [ 0.19268, "u" ], [ 0.14303, "n" ], [ 0.175557, "t" ], [ 0.406596, " " ], [ 0.69215, "\u001b[4m/\u001b[24m" ], [ 0.242216, "\b\u001b[4m/\u001b[4mt\u001b[24m" ], [ 0.260453, "\b\u001b[4mt\u001b[4mm\u001b[24m" ], [ 0.2605, "\b\u001b[4mm\u001b[4mp\u001b[24m" ], [ 0.014483, "\b\b\b\b\u001b[24m/\u001b[24mt\u001b[24mm\u001b[24mp" ], [ 0.597766, "\b\b\b\b\u001b[4m/\u001b[4mt\u001b[4mm\u001b[4mp\u001b[24m \b" ], [ 0.482551, "\b\u001b[4mp\u001b[4m/\u001b[24m" ], [ 0.236361, "\b\u001b[4m/\u001b[4mm\u001b[24m" ], [ 0.212317, "\b\u001b[4mm\u001b[4mo\u001b[24m" ], [ 0.160611, "\b\u001b[4mo\u001b[4mu\u001b[24m" ], [ 0.142036, "\b\u001b[4mu\u001b[4mn\u001b[24m" ], [ 0.335664, "\b\u001b[4mn\u001b[4mt\u001b[24m" ], [ 1.159614, "\u001b[?1l\u001b>" ], [ 0.001057, "\u001b[?2004l\r\r\n" ], [ 0.000642, "\u001b]2;borg umount /tmp/mount\u0007\u001b]1;borg\u0007" ], [ 0.596849, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.001387, "\u001b]7;\u0007" ], [ 0.001067, "\u001b]7;\u0007" ], [ 9.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 7.6e-05, "\u001b[?1h\u001b=" ], [ 0.000187, "\u001b[?2004h" ], [ 1.467084, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.264583, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.277487, "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" ], [ 0.12184, "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.103403, "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.125651, "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.465663, "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" ], [ 0.298764, "\b\u001b[1m'\u001b[1ms it, but of course there is more to explore, so have a look at the d\u001b[1mo\u001b[1mcs.\u001b[0m\u001b[39m\u001b[K\r\r\n\u001b[K\u001b[A\u001b[4C" ], [ 1.453815, "\u001b[1B\r\u001b[K\u001b[A\u001b[4C" ], [ 2e-05, "\u001b[?1l\u001b>" ], [ 0.000725, "\u001b[?2004l\u001b[1B\r\r\n" ], [ 0.00054, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.00118, "\u001b]7;\u0007" ], [ 0.000909, "\u001b]7;\u0007" ], [ 7.8e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 3e-05, "\u001b[?1h\u001b=" ], [ 0.000524, "\u001b[?2004h" ], [ 1.74407, "\u001b[?2004l\r\r\n" ] ] } borgbackup-1.1.15/docs/misc/asciinema/install.json0000644000175000017500000007225513771325506022075 0ustar useruser00000000000000{ "version": 1, "width": 78, "height": 25, "duration": 140.275038, "command": null, "title": null, "env": { "TERM": "xterm-256color", "SHELL": "/bin/zsh" }, "stdout": [ [ 9.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000108, "\u001b[?1h\u001b=" ], [ 0.000182, "\u001b[?2004h" ], [ 0.45774, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.31515, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.220208, "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" ], [ 0.121752, "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.142781, "\b\u001b[1mh\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.117367, "\b\u001b[1mi\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.255471, "\b\u001b[1ms\u001b[1m asciinema will show you the installation of borg as a standalone bina\u001b[1mr\u001b[1my. Usually you only need this if you want to have an up-to-date version of bo\u001b[1mr\u001b[1mg or no package is available for your distro/OS.\u001b[0m\u001b[39m\u001b[K" ], [ 0.563803, "\u001b[?1l\u001b>" ], [ 0.000412, "\u001b[?2004l\r\r\n" ], [ 0.000823, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 5.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 5.2e-05, "\u001b[?1h\u001b=" ], [ 0.00027, "\u001b[?2004h" ], [ 2.191111, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.301924, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.21419, "\b\b\u001b[1m#\u001b[1m \u001b[1mF\u001b[0m\u001b[39m" ], [ 0.117654, "\b\u001b[1mF\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.198616, "\b\u001b[1mi\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.101113, "\b\u001b[1mr\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.107485, "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.357443, "\b\u001b[1mt\u001b[1m, we need to download the version, we'd like to install…\u001b[0m\u001b[39m" ], [ 0.516614, "\u001b[?1l\u001b>" ], [ 0.000826, "\u001b[?2004l\r\r\n" ], [ 0.000757, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 4.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000134, "\u001b[?1h\u001b=" ], [ 0.000598, "\u001b[?2004h" ], [ 1.411874, "\u001b[32mw\u001b[39m" ], [ 0.119593, "\b\u001b[1m\u001b[31mw\u001b[1m\u001b[31mg\u001b[0m\u001b[39m" ], [ 0.13329, "\b\b\u001b[1m\u001b[31mw\u001b[1m\u001b[31mg\u001b[1m\u001b[31me\u001b[0m\u001b[39m" ], [ 0.127861, "\b\b\b\u001b[0m\u001b[32mw\u001b[0m\u001b[32mg\u001b[0m\u001b[32me\u001b[32mt\u001b[39m" ], [ 0.324708, " -q --show-progress https://github.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64\u001b[K" ], [ 0.797801, "\u001b[?1l\u001b>" ], [ 0.000964, "\u001b[?2004l\r\r\n" ], [ 0.000631, "\u001b]2;wget -q --show-progress \u0007\u001b]1;wget\u0007" ], [ 1.306534, "\rborg-linux64 0%[ ] 0 --.-KB/s " ], [ 0.23185, "\rborg-linux64 0%[ ] 24.58K 106KB/s " ], [ 0.341907, "\rborg-linux64 0%[ ] 92.58K 161KB/s " ], [ 0.230021, "\rborg-linux64 1%[ ] 160.58K 200KB/s " ], [ 0.22577, "\rborg-linux64 1%[ ] 211.58K 206KB/s " ], [ 0.229246, "\rborg-linux64 2%[ ] 279.58K 222KB/s " ], [ 0.347713, "\rborg-linux64 2%[ ] 347.58K 216KB/s " ], [ 0.224636, "\rborg-linux64 98%[================> ] 12.41M 404KB/s eta 2s " ], [ 0.205977, "\rborg-linux64 99%[================> ] 12.50M 401KB/s eta 0s " ], [ 0.137036, "\rborg-linux64 100%[=================>] 12.56M 417KB/s in 39s \r\n" ], [ 0.000872, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000103, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 0.000117, "\u001b[?1h\u001b=" ], [ 0.000208, "\u001b[?2004h" ], [ 2.118269, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.266901, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.142975, "\b\b\u001b[1m#\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.074155, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.167144, "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" ], [ 0.2241, "\b\u001b[1md\u001b[1m do not forget the GPG signature…!\u001b[0m\u001b[39m" ], [ 0.596854, "\u001b[?1l\u001b>" ], [ 0.000696, "\u001b[?2004l\r\r\n" ], [ 0.000691, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 8.2e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 3e-05, "\u001b[?1h\u001b=" ], [ 0.000286, "\u001b[?2004h" ], [ 1.51737, "\u001b[1m# and do not forget the GPG signature…!\u001b[0m\u001b[39m" ], [ 0.314759, "\u001b[39D\u001b[0m\u001b[32mw\u001b[0m\u001b[32mg\u001b[0m\u001b[32me\u001b[0m\u001b[32mt\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39mq\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ms\u001b[0m\u001b[39mh\u001b[0m\u001b[39mo\u001b[0m\u001b[39mw\u001b[0m\u001b[39m-\u001b[0m\u001b[39mp\u001b[0m\u001b[39mr\u001b[0m\u001b[39mo\u001b[0m\u001b[39mg\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39ms\u001b[0m\u001b[39ms\u001b[0m\u001b[39m \u001b[0m\u001b[39mh\u001b[0m\u001b[39mt\u001b[0m\u001b[39mt\u001b[0m\u001b[39mp\u001b[0m\u001b[39ms\u001b[0m\u001b[39m:\u001b[0m\u001b[39m/\u001b[0m\u001b[39m/\u001b[0m\u001b[39mg\u001b[0m\u001b[39mi\u001b[0m\u001b[39mt\u001b[0m\u001b[39mh\u001b[0m\u001b[39mu\u001b[0m\u001b[39mb\u001b[0m\u001b[39m.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64\u001b[K" ], [ 1.043903, "." ], [ 0.207322, "a" ], [ 0.16952, "s" ], [ 0.19625, "c" ], [ 0.359073, "\u001b[?1l\u001b>" ], [ 0.001424, "\u001b[?2004l\r\r\n" ], [ 0.000717, "\u001b]2;wget -q --show-progress \u0007\u001b]1;wget\u0007" ], [ 1.236785, "\rborg-linux64.asc 0%[ ] 0 --.-KB/s " ], [ 1.8e-05, "\rborg-linux64.asc 100%[=================>] 819 --.-KB/s in 0s \r\n" ], [ 0.00093, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 8.7e-05, "\u001b[?1h\u001b=" ], [ 0.000183, "\u001b[?2004h" ], [ 3.234458, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 1.023301, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.331266, "\b\b\u001b[1m#\u001b[1m \u001b[1mI\u001b[0m\u001b[39m" ], [ 0.166799, "\b\u001b[1mI\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.34554, "\b\u001b[1mn\u001b[1m this case, we have already imported the public key of a borg developer.\u001b[1m \u001b[1mSo we only need to verify it:\u001b[0m\u001b[39m\u001b[K" ], [ 1.499971, "\u001b[?1l\u001b>" ], [ 0.001069, "\u001b[?2004l\r\r\n" ], [ 0.000922, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000159, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 3.1e-05, "\u001b[?1h\u001b=" ], [ 0.000451, "\u001b[?2004h" ], [ 2.31724, "\u001b[32mg\u001b[39m" ], [ 0.151243, "\b\u001b[32mg\u001b[32mp\u001b[39m" ], [ 0.074305, "\b\b\u001b[32mg\u001b[32mp\u001b[32mg\u001b[39m" ], [ 0.315686, " " ], [ 0.345624, "-" ], [ 0.100203, "-" ], [ 0.291673, "v" ], [ 0.11497, "e" ], [ 0.183055, "r" ], [ 0.146521, "i" ], [ 0.11872, "f" ], [ 0.309865, "y" ], [ 0.346758, " " ], [ 0.264902, "\u001b[4mb\u001b[24m" ], [ 0.307683, "\u001b[?7l" ], [ 2.1e-05, "\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.011212, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mgpg\u001b[39m --verify \u001b[4mborg-linux64\u001b[24m\u001b[K" ], [ 0.577848, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.015636, "\r\r\u001b[27C" ], [ 0.000193, "\r\r\n\u001b[J" ], [ 2e-05, "\u001b[J\u001b[0mborg-linux64 \u001b[Jborg-linux64.asc\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify \u001b[4mborg-linux64\u001b[24m\u001b[K" ], [ 0.626316, "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" ], [ 0.012642, "\u001b[12D\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24m-\u001b[24ml\u001b[24mi\u001b[24mn\u001b[24mu\u001b[24mx\u001b[24m6\u001b[24m4" ], [ 0.000154, "\r\r\n" ], [ 1.8e-05, "\u001b[J\u001b[7mborg-linux64 \u001b[0m \u001b[Jborg-linux64.asc\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify borg-linux64\u001b[K" ], [ 0.189964, "\r\r\n" ], [ 1.7e-05, "\u001b[7mborg-linux64 \u001b[0m \r\u001b[7mborg-linux64 \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify borg-linux64\u001b[K.asc" ], [ 0.000225, "\r\r\n" ], [ 1.9e-05, "\u001b[18C\u001b[7mborg-linux64.asc\u001b[0m\rborg-linux64 \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify borg-linux64.asc\u001b[K" ], [ 0.866638, "\r\r\n\u001b[J\u001b[A\u001b[31C\u001b[1m \u001b[0m" ], [ 0.001241, "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mgpg\u001b[39m --verify \u001b[4mborg-linux64.asc\u001b[24m\u001b[1m \u001b[0m\u001b[K" ], [ 0.654098, "\b\u001b[0m \b" ], [ 2.7e-05, "\u001b[?1l\u001b>" ], [ 0.001361, "\u001b[?2004l\r\r\n" ], [ 0.000737, "\u001b]2;gpg --verify borg-linux64.asc\u0007" ], [ 2.6e-05, "\u001b]1;gpg\u0007" ], [ 0.002478, "gpg: assuming signed data in `borg-linux64'\r\n" ], [ 0.082679, "gpg: Signature made Sun Jun 18 16:54:19 2017 CEST\r\ngpg: using RSA key 0x243ACFA951F78E01\r\n" ], [ 0.003947, "gpg: Good signature from \"Thomas Waldmann \" [ultimate]\r\ngpg: aka \"Thomas Waldmann \" [ultimate]\r\ngpg: aka \"Thomas Waldmann \" [ultimate]\r\n" ], [ 2.1e-05, "gpg: aka \"Thomas Waldmann \"" ], [ 1.5e-05, " [ultimate]\r\n" ], [ 0.001743, "Primary key fingerprint: 6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393\r\n Subkey fingerprint: 2F81 AFFB AB04 E11F E8EE 65D4 243A CFA9 51F7 8E01\r\n" ], [ 0.000384, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 9e-05, "\u001b[?1h\u001b=" ], [ 0.000155, "\u001b[?2004h" ], [ 4.627219, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.225001, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.213579, "\b\b\u001b[1m#\u001b[1m \u001b[1mO\u001b[0m\u001b[39m" ], [ 0.132218, "\b\u001b[1mO\u001b[1mk\u001b[0m\u001b[39m" ], [ 0.061577, "\b\u001b[1mk\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.154786, "\b\u001b[1ma\u001b[1my\u001b[0m\u001b[39m" ], [ 0.172921, "\b\u001b[1my\u001b[1m,\u001b[0m\u001b[39m" ], [ 0.648978, "\b\u001b[1m,\u001b[1m the binary is valid!\u001b[0m\u001b[39m" ], [ 0.822303, "\u001b[?1l\u001b>" ], [ 0.000388, "\u001b[?2004l\r\r\n" ], [ 0.000681, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000113, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 4.6e-05, "\u001b[?1h\u001b=" ], [ 0.000252, "\u001b[?2004h" ], [ 2.048081, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.243659, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.174242, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.131485, "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.109555, "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" ], [ 0.128309, "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" ], [ 0.163064, "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" ], [ 0.138953, "\b\u001b[1mi\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.050135, "\b\u001b[1mn\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.095385, "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.114692, "\b\u001b[1mt\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.155821, "\b\u001b[1ma\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.118297, "\b\u001b[1ml\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.165834, "\b\u001b[1ml\u001b[1m \u001b[0m\u001b[39m" ], [ 0.231866, "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" ], [ 0.159893, "\b\u001b[1mi\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.289328, "\b\u001b[1mt\u001b[1m:\u001b[0m\u001b[39m" ], [ 2.713706, "\u001b[?1l\u001b>" ], [ 0.000362, "\u001b[?2004l\r\r\n" ], [ 0.000674, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 5.5e-05, "\u001b[?1h\u001b=" ], [ 0.000272, "\u001b[?2004h" ], [ 1.703796, "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" ], [ 0.12754, "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" ], [ 0.149508, "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" ], [ 0.121616, "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" ], [ 0.321903, " \u001b[32mcp\u001b[39m \u001b[4mborg-linux64\u001b[24m \u001b[4m/usr/local/bin/borg\u001b[24m" ], [ 2.352378, "\u001b[?1l\u001b>" ], [ 0.001087, "\u001b[?2004l\r\r\n" ], [ 0.00091, "\u001b]2;sudo cp borg-linux64 /usr/local/bin/borg\u0007\u001b]1;cp\u0007" ], [ 0.013652, "[sudo] password for rugk: " ], [ 2.992379, "\r\n" ], [ 0.031173, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.3e-05, "\u001b[?1h\u001b=" ], [ 0.000214, "\u001b[?2004h" ], [ 5.400881, "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" ], [ 0.138474, "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" ], [ 0.114266, "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" ], [ 0.098068, "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" ], [ 0.16926, " " ], [ 0.188874, "\u001b[32mcp\u001b[39m \u001b[4mborg-linux64\u001b[24m \u001b[4m/usr/local/bin/borg\u001b[24m" ], [ 0.413244, "\u001b[34D\u001b[32mh\u001b[32mo\u001b[24m\u001b[32mw\u001b[24m\u001b[32mn\u001b[39m\u001b[24m \u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[24m:\u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[20C" ], [ 1.397429, "\u001b[?1l\u001b>" ], [ 0.00132, "\u001b[?2004l\r\r\n" ], [ 0.00075, "\u001b]2;sudo chown root:root /usr/local/bin/borg\u0007\u001b]1;chown\u0007" ], [ 0.010539, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 5.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.8e-05, "\u001b[?1h\u001b=" ], [ 0.000284, "\u001b[?2004h" ], [ 2.229436, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.213191, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.099902, "\b\b\u001b[1m#\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.069437, "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.106463, "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" ], [ 0.080598, "\b\u001b[1md\u001b[1m \u001b[0m\u001b[39m" ], [ 0.172381, "\b\u001b[1m \u001b[1mm\u001b[0m\u001b[39m" ], [ 0.096638, "\b\u001b[1mm\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.078606, "\b\u001b[1ma\u001b[1mk\u001b[0m\u001b[39m" ], [ 0.106382, "\b\u001b[1mk\u001b[1me\u001b[0m\u001b[39m" ], [ 0.110174, "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" ], [ 0.216964, "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" ], [ 0.183739, "\b\u001b[1mi\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.150872, "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" ], [ 0.188901, "\b\u001b[1m \u001b[1me\u001b[0m\u001b[39m" ], [ 0.099651, "\b\u001b[1me\u001b[1mx\u001b[0m\u001b[39m" ], [ 0.1893, "\b\u001b[1mx\u001b[1me\u001b[0m\u001b[39m" ], [ 0.187999, "\b\u001b[1me\u001b[1mc\u001b[0m\u001b[39m" ], [ 0.128262, "\b\u001b[1mc\u001b[1mu\u001b[0m\u001b[39m" ], [ 0.144851, "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.091175, "\b\u001b[1mt\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.135575, "\b\u001b[1ma\u001b[1mb\u001b[0m\u001b[39m" ], [ 0.18045, "\b\u001b[1mb\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.110687, "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" ], [ 0.360861, "\b\u001b[1me\u001b[1m…\u001b[0m\u001b[39m" ], [ 0.69896, "\u001b[?1l\u001b>" ], [ 0.000433, "\u001b[?2004l\r\r\n" ], [ 0.000544, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 5.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.8e-05, "\u001b[?1h\u001b=" ], [ 0.000306, "\u001b[?2004h" ], [ 1.028139, "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" ], [ 0.136555, "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" ], [ 0.115701, "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" ], [ 0.151048, "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" ], [ 0.276036, " \u001b[32mchown\u001b[39m root:root \u001b[4m/usr/local/bin/borg\u001b[24m" ], [ 0.284509, "\u001b[34D\u001b[32mp\u001b[39m\u001b[39m \u001b[39m\u001b[4mb\u001b[39m\u001b[4mo\u001b[4mr\u001b[4mg\u001b[4m-\u001b[4ml\u001b[4mi\u001b[4mn\u001b[4mu\u001b[4mx\u001b[4m6\u001b[4m4\u001b[24m\u001b[20C" ], [ 0.422112, "\u001b[34D\u001b[32mh\u001b[32mo\u001b[24m\u001b[32mw\u001b[24m\u001b[32mn\u001b[39m\u001b[24m \u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[24m:\u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[20C" ], [ 0.616462, "\u001b[33D\u001b[32mm\u001b[32mo\u001b[32md\u001b[39m 755\u001b[6P\u001b[20C \b\b\b\b\b\b" ], [ 1.090337, "\u001b[?1l\u001b>" ], [ 0.00101, "\u001b[?2004l\r\r\n" ], [ 0.000655, "\u001b]2;sudo chmod 755 /usr/local/bin/borg\u0007" ], [ 1.8e-05, "\u001b]1;chmod\u0007" ], [ 0.009932, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000124, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 5.9e-05, "\u001b[?1h\u001b=" ], [ 0.000237, "\u001b[?2004h" ], [ 3.613554, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.305561, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.451533, "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" ], [ 0.199295, "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.134017, "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" ], [ 0.232574, "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" ], [ 0.283449, "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" ], [ 0.156927, "\b\u001b[1mc\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.100718, "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" ], [ 0.145048, "\b\u001b[1me\u001b[1mc\u001b[0m\u001b[39m" ], [ 0.238223, "\b\u001b[1mc\u001b[1mk\u001b[0m\u001b[39m" ], [ 0.145393, "\b\u001b[1mk\u001b[1m \u001b[0m\u001b[39m" ], [ 0.195514, "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" ], [ 0.190153, "\b\u001b[1mi\u001b[1mt\u001b[0m\u001b[39m" ], [ 1.202922, "\b\u001b[1mt\u001b[1m:\u001b[0m\u001b[39m" ], [ 0.17572, "\b\u001b[1m:\u001b[1m \u001b[0m\u001b[39m" ], [ 0.209752, "\b\u001b[1m \u001b[1m(\u001b[0m\u001b[39m" ], [ 0.266264, "\b\u001b[1m(\u001b[1mp\u001b[0m\u001b[39m" ], [ 0.136174, "\b\u001b[1mp\u001b[1mo\u001b[0m\u001b[39m" ], [ 0.136549, "\b\u001b[1mo\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.157321, "\b\u001b[1ms\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.134812, "\b\u001b[1ms\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.177707, "\b\u001b[1mi\u001b[1mb\u001b[0m\u001b[39m" ], [ 0.184458, "\b\u001b[1mb\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.104718, "\b\u001b[1ml\u001b[1my\u001b[0m\u001b[39m" ], [ 0.132476, "\b\u001b[1my\u001b[1m \u001b[0m\u001b[39m" ], [ 0.14269, "\b\u001b[1m \u001b[1mn\u001b[0m\u001b[39m" ], [ 0.109627, "\b\u001b[1mn\u001b[1me\u001b[0m\u001b[39m" ], [ 0.150487, "\b\u001b[1me\u001b[1me\u001b[0m\u001b[39m" ], [ 0.202663, "\b\u001b[1me\u001b[1md\u001b[0m\u001b[39m" ], [ 0.12975, "\b\u001b[1md\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.095469, "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" ], [ 0.160511, "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" ], [ 0.149495, "\b\u001b[1ma\u001b[1m \u001b[0m\u001b[39m" ], [ 0.189727, "\b\u001b[1m \u001b[1mt\u001b[0m\u001b[39m" ], [ 0.098768, "\b\u001b[1mt\u001b[1me\u001b[0m\u001b[39m" ], [ 0.125099, "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.077112, "\b\u001b[1mr\u001b[1mm\u001b[0m\u001b[39m" ], [ 0.147886, "\b\u001b[1mm\u001b[1mi\u001b[0m\u001b[39m" ], [ 0.124366, "\b\u001b[1mi\u001b[1mn\u001b[0m\u001b[39m" ], [ 0.088118, "\b\u001b[1mn\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.116281, "\b\u001b[1ma\u001b[1ml\u001b[0m\u001b[39m" ], [ 0.146487, "\b\u001b[1ml\u001b[1m \u001b[0m\u001b[39m" ], [ 0.156764, "\b\u001b[1m \u001b[1mr\u001b[0m\u001b[39m" ], [ 0.195688, "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" ], [ 0.40621, "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" ], [ 0.263813, "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.087475, "\b\u001b[1mt\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.090176, "\b\u001b[1ma\u001b[1mr\u001b[0m\u001b[39m" ], [ 0.12059, "\b\u001b[1mr\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.232423, "\b\u001b[1mt\u001b[1m)\u001b[0m\u001b[39m" ], [ 1.383975, "\u001b[?1l\u001b>" ], [ 0.000692, "\u001b[?2004l\r\r\n" ], [ 0.001339, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000147, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.6e-05, "\u001b[?1h\u001b=" ], [ 0.000558, "\u001b[?2004h" ], [ 1.045406, "\u001b[4mb\u001b[24m" ], [ 0.163217, "\b\u001b[4mb\u001b[4mo\u001b[24m" ], [ 0.131464, "\b\b\u001b[4mb\u001b[4mo\u001b[4mr\u001b[24m" ], [ 0.103279, "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mo\u001b[24m\u001b[32mr\u001b[32mg\u001b[39m" ], [ 0.181118, " " ], [ 0.440449, "-" ], [ 0.186299, "V" ], [ 0.522054, "\u001b[?1l\u001b>" ], [ 0.000643, "\u001b[?2004l\r\r\n" ], [ 0.000967, "\u001b]2;borg -V\u0007\u001b]1;borg\u0007" ], [ 0.426128, "borg 1.1.0b6\r\n" ], [ 0.040916, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000101, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.5e-05, "\u001b[?1h\u001b=" ], [ 0.000176, "\u001b[?2004h" ], [ 1.92655, "\u001b[1m#\u001b[0m\u001b[39m" ], [ 0.247681, "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" ], [ 0.233391, "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" ], [ 0.127191, "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" ], [ 0.023053, "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" ], [ 0.155649, "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" ], [ 0.3483, "\b\u001b[1mt\u001b[1m's it! Check out the other screencasts to see how to actually use borg\u001b[1mb\u001b[1mackup.\u001b[0m\u001b[39m\u001b[K" ], [ 1.701253, "\u001b[?1l\u001b>" ], [ 0.000707, "\u001b[?2004l\r\r\n" ], [ 0.000682, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 1.1e-05, "\u001b]1;..lder/borgdemo\u0007" ], [ 5.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" ], [ 6.8e-05, "\u001b[?1h\u001b=" ], [ 0.000284, "\u001b[?2004h" ], [ 1.579085, "\u001b[?2004l\r\r\n" ] ] } borgbackup-1.1.15/docs/misc/asciinema/advanced.sh0000644000175000017500000000533313771325506021626 0ustar useruser00000000000000# For the pro users, here are some advanced features of borg, so you can impress your friends. ;) # Note: This screencast was made with borg version 1.1.0 – older or newer borg versions may behave differently. # First of all, we can use several environment variables for borg. # E.g. we do not want to type in our repo path and password again and again… export BORG_REPO='/media/backup/borgdemo' export BORG_PASSPHRASE='1234' # Problem solved, borg will use this automatically… :) # We'll use this right away… ## ADVANCED CREATION ## # We can also use some placeholders in our archive name… borg create --stats --progress --compression lz4 ::{user}-{now} Wallpaper # Notice the backup name. # And we can put completely different data, with different backup settings, in our backup. It will be deduplicated, anyway: borg create --stats --progress --compression zlib,6 --exclude ~/Downloads/big ::{user}-{now} ~/Downloads # Or let's backup a device via STDIN. sudo dd if=/dev/loop0 bs=10M | borg create --progress --stats ::specialbackup - # Let's continue with some simple things: ## USEFUL COMMANDS ## # You can show some information about an archive. You can even do it without needing to specify the archive name: borg info :: --last 1 # So let's rename our last archive: borg rename ::specialbackup backup-block-device borg info :: --last 1 # A very important step if you choose keyfile mode (where the keyfile is only saved locally) is to export your keyfile and possibly print it, etc. borg key export :: --qr-code file.html # this creates a nice HTML, but when you want something simpler… < remove comment > < let there: borg check > --paper # this is a "manual input"-only backup (but it is also included in the --qr-code option) ## MAINTENANCE ## # Sometimes backups get broken or we want a regular "checkup" that everything is okay… borg check -v :: # Next problem: Usually you do not have infinite disk space. So you may need to prune your archive… # You can tune this in every detail. See the docs for details. Here only a simple example: borg prune --list --keep-last 1 --dry-run # When actually executing it in a script, you have to use it without the --dry-run option, of course. ## RESTORE ## # When you want to see the diff between two archives use this command. # E.g. what happened between the first two backups? borg diff ::backup1 backup2 # Ah, we added a file, right… # There are also other ways to extract the data. # E.g. as a tar archive. borg export-tar --progress ::backup2 backup.tar.gz ls -l # You can mount an archive or even the whole repository: mkdir /tmp/mount borg mount :: /tmp/mount ls -la /tmp/mount borg umount /tmp/mount # That's it, but of course there is more to explore, so have a look at the docs. borgbackup-1.1.15/docs/misc/asciinema/install.sh0000644000175000017500000000173613771325506021532 0ustar useruser00000000000000# This asciinema will show you the installation of borg as a standalone binary. Usually you only need this if you want to have an up-to-date version of borg or no package is available for your distro/OS. # First, we need to download the version, we'd like to install… wget -q --show-progress https://github.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64 # and do not forget the GPG signature…! wget -q --show-progress https://github.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64.asc # In this case, we have already imported the public key of a borg developer. So we only need to verify it: gpg --verify borg-linux64.asc # Okay, the binary is valid! # Now install it: sudo cp borg-linux64 /usr/local/bin/borg sudo chown root:root /usr/local/bin/borg # and make it executable… sudo chmod 755 /usr/local/bin/borg # Now check it: (possibly needs a terminal restart) borg -V # That's it! Check out the other screencasts to see how to actually use borgbackup. borgbackup-1.1.15/docs/misc/prune-example.txt0000644000175000017500000000713313771325506021117 0ustar useruser00000000000000borg prune visualized ===================== Assume it is 2016-01-01, today's backup has not yet been made and you have created at least one backup on each day in 2015 except on 2015-12-19 (no backup made on that day). This is what borg prune --keep-daily 14 --keep-monthly 6 would keep. Backups kept by the --keep-daily rule are marked by a "d" to the right, backups kept by the --keep-monthly rule are marked by a "m" to the right. Calendar view ------------- 2015 January February March Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su 1 2 3 4 1 1 5 6 7 8 9 10 11 2 3 4 5 6 7 8 2 3 4 5 6 7 8 12 13 14 15 16 17 18 9 10 11 12 13 14 15 9 10 11 12 13 14 15 19 20 21 22 23 24 25 16 17 18 19 20 21 22 16 17 18 19 20 21 22 26 27 28 29 30 31 23 24 25 26 27 28 23 24 25 26 27 28 29 30 31 April May June Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su 1 2 3 4 5 1 2 3 1 2 3 4 5 6 7 6 7 8 9 10 11 12 4 5 6 7 8 9 10 8 9 10 11 12 13 14 13 14 15 16 17 18 19 11 12 13 14 15 16 17 15 16 17 18 19 20 21 20 21 22 23 24 25 26 18 19 20 21 22 23 24 22 23 24 25 26 27 28 27 28 29 30 25 26 27 28 29 30 31 29 30m July August September Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su 1 2 3 4 5 1 2 1 2 3 4 5 6 6 7 8 9 10 11 12 3 4 5 6 7 8 9 7 8 9 10 11 12 13 13 14 15 16 17 18 19 10 11 12 13 14 15 16 14 15 16 17 18 19 20 20 21 22 23 24 25 26 17 18 19 20 21 22 23 21 22 23 24 25 26 27 27 28 29 30 31m 24 25 26 27 28 29 30 28 29 30m 31m October November December Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su 1 2 3 4 1 1 2 3 4 5 6 5 6 7 8 9 10 11 2 3 4 5 6 7 8 7 8 9 10 11 12 13 12 13 14 15 16 17 18 9 10 11 12 13 14 15 14 15 16 17d18d19 20d 19 20 21 22 23 24 25 16 17 18 19 20 21 22 21d22d23d24d25d26d27d 26 27 28 29 30 31m 23 24 25 26 27 28 29 28d29d30d31d 30m List view --------- --keep-daily 14 --keep-monthly 6 ------------------------------------------------- 1. 2015-12-31 (2015-12-31 kept by daily rule) 2. 2015-12-30 1. 2015-11-30 3. 2015-12-29 2. 2015-10-31 4. 2015-12-28 3. 2015-09-30 5. 2015-12-27 4. 2015-08-31 6. 2015-12-26 5. 2015-07-31 7. 2015-12-25 6. 2015-06-30 8. 2015-12-24 9. 2015-12-23 10. 2015-12-22 11. 2015-12-21 12. 2015-12-20 (no backup made on 2015-12-19) 13. 2015-12-18 14. 2015-12-17 Notes ----- 2015-12-31 is kept due to the --keep-daily 14 rule (because it is applied first), not due to the --keep-monthly rule. Because of that, the --keep-monthly 6 rule keeps Nov, Oct, Sep, Aug, Jul and Jun. December is not considered for this rule, because that backup was already kept because of the daily rule. 2015-12-17 is kept to satisfy the --keep-daily 14 rule - because no backup was made on 2015-12-19. If a backup had been made on that day, it would not keep the one from 2015-12-17. We did not include yearly, weekly, hourly, minutely or secondly rules to keep this example simple. They all work in basically the same way. The weekly rule is easy to understand roughly, but hard to understand in all details. If interested, read "ISO 8601:2000 standard week-based year". borgbackup-1.1.15/docs/misc/internals-picture.txt0000644000175000017500000000344113771325506022003 0ustar useruser00000000000000BorgBackup from 10.000m ======================= +--------+ +--------+ +--------+ |archive0| |archive1| ... |archiveN| +--------+ +--------+ +--+-----+ | | | | | | | +---+ | | | | | | | +------+-------+ | | | | | /chunk\/chunk\/chunk\... /maybe different chunks lists\ +-----------------------------------------------------------------+ |item list | +-----------------------------------------------------------------+ | +-------------------------------------+--------------+ | | | | | | +-------------+ +-------------+ | |item0 | |item1 | | | - owner | | - owner | | | - size | | - size | ... | - ... | | - ... | | - chunks | | - chunks | +----+--------+ +-----+-------+ | | | +-----+----------------------------+-----------------+ | | | | +-o-----o------------+ | | | | | | /chunk0\/chunk1\ ... /chunkN\ /chunk0\/chunk1\ ... /chunkN'\ +-----------------------------+ +------------------------------+ |file0 | |file0' | +-----------------------------+ +------------------------------+ Thanks to anarcat for drawing the picture! borgbackup-1.1.15/docs/man/0000755000175000017500000000000013771325773015416 5ustar useruser00000000000000borgbackup-1.1.15/docs/man/borg-change-passphrase.10000644000175000017500000000303313771325506022014 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-CHANGE-PASSPHRASE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-change-passphrase \- Change repository key file passphrase . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] change\-passphrase [options] [REPOSITORY] .SH DESCRIPTION .sp The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. .sp Please note that this command only changes the passphrase, but not any secret protected by it (like e.g. encryption/MAC keys or chunker seed). Thus, changing the passphrase after passphrase and borg key got compromised does not protect future (nor past) backups to the same repository. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .sp REPOSITORY .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-key-export.10000644000175000017500000000466313771325506020541 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-KEY-EXPORT 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-key-export \- Export the repository key for backup . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] key export [options] [REPOSITORY] [PATH] .SH DESCRIPTION .sp If repository encryption is used, the repository is inaccessible without the key. This command allows to backup this essential key. Note that the backup produced does not include the passphrase itself (i.e. the exported key stays encrypted). In order to regain access to a repository, one needs both the exported key and the original passphrase. .sp There are two backup formats. The normal backup format is suitable for digital storage as a file. The \fB\-\-paper\fP backup format is optimized for printing and typing in while importing, with per line checks to reduce problems with manual input. .sp For repositories using keyfile encryption the key is saved locally on the system that is capable of doing backups. To guard against loss of this key, the key needs to be backed up independently of the main data backup. .sp For repositories using the repokey encryption the key is saved in the repository in the config file. A backup is thus not strictly needed, but guards against the repository becoming inaccessible if the file is damaged for some reason. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .sp REPOSITORY .INDENT 0.0 .TP .B PATH where to store the backup .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-paper Create an export suitable for printing and later type\-in .TP .B \-\-qr\-html Create an html file suitable for printing and later type\-in or qr scan .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-key\-import(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-check.10000644000175000017500000001173013771325506017500 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-CHECK 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-check \- Check repository consistency . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] check [options] [REPOSITORY_OR_ARCHIVE] .SH DESCRIPTION .sp The check command verifies the consistency of a repository and the corresponding archives. .sp check \-\-repair is a potentially dangerous function and might lead to data loss (for kinds of corruption it is not capable of dealing with). BE VERY CAREFUL! .sp First, the underlying repository data files are checked: .INDENT 0.0 .IP \(bu 2 For all segments, the segment magic header is checked. .IP \(bu 2 For all objects stored in the segments, all metadata (e.g. CRC and size) and all data is read. The read data is checked by size and CRC. Bit rot and other types of accidental damage can be detected this way. .IP \(bu 2 In repair mode, if an integrity error is detected in a segment, try to recover as many objects from the segment as possible. .IP \(bu 2 In repair mode, make sure that the index is consistent with the data stored in the segments. .IP \(bu 2 If checking a remote repo via \fBssh:\fP, the repo check is executed on the server without causing significant network traffic. .IP \(bu 2 The repository check can be skipped using the \fB\-\-archives\-only\fP option. .UNINDENT .sp Second, the consistency and correctness of the archive metadata is verified: .INDENT 0.0 .IP \(bu 2 Is the repo manifest present? If not, it is rebuilt from archive metadata chunks (this requires reading and decrypting of all metadata and data). .IP \(bu 2 Check if archive metadata chunk is present; if not, remove archive from manifest. .IP \(bu 2 For all files (items) in the archive, for all chunks referenced by these files, check if chunk is present. In repair mode, if a chunk is not present, replace it with a same\-size replacement chunk of zeroes. If a previously lost chunk reappears (e.g. via a later backup), in repair mode the all\-zero replacement chunk will be replaced by the correct chunk. This requires reading of archive and file metadata, but not data. .IP \(bu 2 In repair mode, when all the archives were checked, orphaned chunks are deleted from the repo. One cause of orphaned chunks are input file related errors (like read errors) in the archive creation process. .IP \(bu 2 If checking a remote repo via \fBssh:\fP, the archive check is executed on the client machine because it requires decryption, and this is always done client\-side as key access is needed. .IP \(bu 2 The archive checks can be time consuming; they can be skipped using the \fB\-\-repository\-only\fP option. .UNINDENT .sp The \fB\-\-verify\-data\fP option will perform a full integrity verification (as opposed to checking the CRC32 of the segment) of data, which means reading the data from the repository, decrypting and decompressing it. This is a cryptographic verification, which will detect (accidental) corruption. For encrypted repositories it is tamper\-resistant as well, unless the attacker has access to the keys. It is also very slow. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE repository or archive to check consistency of .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-repository\-only only perform repository checks .TP .B \-\-archives\-only only perform archives checks .TP .B \-\-verify\-data perform cryptographic archive data integrity verification (conflicts with \fB\-\-repository\-only\fP) .TP .B \-\-repair attempt to repair any inconsistencies found .TP .B \-\-save\-space work slower, but using less space .UNINDENT .SS Archive filters .INDENT 0.0 .TP .BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP .BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP .BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N consider first N archives after other filters were applied .TP .BI \-\-last \ N consider last N archives after other filters were applied .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-recreate.10000644000175000017500000001715013771325506020217 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-RECREATE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-recreate \- Re-create archives . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] recreate [options] [REPOSITORY_OR_ARCHIVE] [PATH...] .SH DESCRIPTION .sp Recreate the contents of existing archives. .sp recreate is a potentially dangerous function and might lead to data loss (if used wrongly). BE VERY CAREFUL! .sp \fB\-\-exclude\fP, \fB\-\-exclude\-from\fP, \fB\-\-exclude\-if\-present\fP, \fB\-\-keep\-exclude\-tags\fP and PATH have the exact same semantics as in "borg create", but they only check for files in the archives and not in the local file system. If PATHs are specified, the resulting archives will only contain files from these PATHs. .sp Note that all paths in an archive are relative, therefore absolute patterns/paths will \fInot\fP match (\fB\-\-exclude\fP, \fB\-\-exclude\-from\fP, PATHs). .sp \fB\-\-recompress\fP allows to change the compression of existing data in archives. Due to how Borg stores compressed size information this might display incorrect information for archives that were not recreated at the same time. There is no risk of data loss by this. .sp \fB\-\-chunker\-params\fP will re\-chunk all files in the archive, this can be used to have upgraded Borg 0.xx or Attic archives deduplicate with Borg 1.x archives. .sp \fBUSE WITH CAUTION.\fP Depending on the PATHs and patterns given, recreate can be used to permanently delete files from archives. When in doubt, use \fB\-\-dry\-run \-\-verbose \-\-list\fP to see how patterns/PATHS are interpreted. See \fIlist_item_flags\fP in \fBborg create\fP for details. .sp The archive being recreated is only removed after the operation completes. The archive that is built during the operation exists at the same time at ".recreate". The new archive will have a different archive ID. .sp With \fB\-\-target\fP the original archive is not replaced, instead a new archive is created. .sp When rechunking space usage can be substantial, expect at least the entire deduplicated size of the archives using the previous chunker params. When recompressing expect approx. (throughput / checkpoint\-interval) in space usage, assuming all chunks are recompressed. .sp If you recently ran borg check \-\-repair and it had to fix lost chunks with all\-zero replacement chunks, please first run another backup for the same data and re\-run borg check \-\-repair afterwards to heal any archives that had lost chunks which are still generated from the input data. .sp Important: running borg recreate to re\-chunk will remove the chunks_healthy metadata of all items with replacement chunks, so healing will not be possible any more after re\-chunking (it is also unlikely it would ever work: due to the change of chunking parameters, the missing chunk likely will never be seen again even if you still have the data that produced it). .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE repository or archive to recreate .TP .B PATH paths to recreate; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-list output verbose list of items (files, dirs, ...) .TP .BI \-\-filter \ STATUSCHARS only display items with the given status characters (listed in borg create \-\-help) .TP .B \-n\fP,\fB \-\-dry\-run do not change anything .TP .B \-s\fP,\fB \-\-stats print statistics at end .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .TP .B \-\-exclude\-caches exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.bford.info/cachedir/spec.html\fP) .TP .BI \-\-exclude\-if\-present \ NAME exclude directories that are tagged by containing a filesystem object with the given NAME .TP .B \-\-keep\-exclude\-tags\fP,\fB \-\-keep\-tag\-files if tag objects are specified with \fB\-\-exclude\-if\-present\fP, don\(aqt omit the tag objects themselves from the backup archive .UNINDENT .SS Archive options .INDENT 0.0 .TP .BI \-\-target \ TARGET create a new archive with the name ARCHIVE, do not replace existing archive (only applies for a single archive) .TP .BI \-c \ SECONDS\fR,\fB \ \-\-checkpoint\-interval \ SECONDS write checkpoint every SECONDS seconds (Default: 1800) .TP .BI \-\-comment \ COMMENT add a comment text to the archive .TP .BI \-\-timestamp \ TIMESTAMP manually specify the archive creation date/time (UTC, yyyy\-mm\-ddThh:mm:ss format). alternatively, give a reference file/directory. .TP .BI \-C \ COMPRESSION\fR,\fB \ \-\-compression \ COMPRESSION select compression algorithm, see the output of the "borg help compression" command for details. .TP .BI \-\-recompress \ MODE recompress data chunks according to \fB\-\-compression\fP\&. MODE \fIif\-different\fP: recompress if current compression is with a different compression algorithm (the level is not considered). MODE \fIalways\fP: recompress even if current compression is with the same compression algorithm (use this to change the compression level). MODE \fInever\fP (default): do not recompress. .TP .BI \-\-chunker\-params \ PARAMS specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE) or \fIdefault\fP to use the current defaults. default: 19,23,21,4095 .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Make old (Attic / Borg 0.xx) archives deduplicate with Borg 1.x archives. # Archives created with Borg 1.1+ and the default chunker params are skipped # (archive ID stays the same). $ borg recreate /mnt/backup \-\-chunker\-params default \-\-progress # Create a backup with little but fast compression $ borg create /mnt/backup::archive /some/files \-\-compression lz4 # Then compress it \- this might take longer, but the backup has already completed, # so no inconsistencies from a long\-running backup job. $ borg recreate /mnt/backup::archive \-\-recompress \-\-compression zlib,9 # Remove unwanted files from all archives in a repository. # Note the relative path for the \-\-exclude option \- archives only contain relative paths. $ borg recreate /mnt/backup \-\-exclude home/icke/Pictures/drunk_photos # Change archive comment $ borg create \-\-comment "This is a comment" /mnt/backup::archivename ~ $ borg info /mnt/backup::archivename Name: archivename Fingerprint: ... Comment: This is a comment \&... $ borg recreate \-\-comment "This is a better comment" /mnt/backup::archivename $ borg info /mnt/backup::archivename Name: archivename Fingerprint: ... Comment: This is a better comment \&... .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-patterns(1)\fP, \fIborg\-placeholders(1)\fP, \fIborg\-compression(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-benchmark.10000644000175000017500000000203213771325506020350 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-BENCHMARK 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-benchmark \- benchmark command . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .nf borg [common options] benchmark crud ... .fi .sp .SH DESCRIPTION .sp These commands do various benchmarks. .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-benchmark\-crud(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-upgrade.10000644000175000017500000001363113771325506020054 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-UPGRADE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-upgrade \- upgrade a repository from a previous version . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] upgrade [options] [REPOSITORY] .SH DESCRIPTION .sp Upgrade an existing, local Borg repository. .SS When you do not need borg upgrade .sp Not every change requires that you run \fBborg upgrade\fP\&. .sp You do \fBnot\fP need to run it when: .INDENT 0.0 .IP \(bu 2 moving your repository to a different place .IP \(bu 2 upgrading to another point release (like 1.0.x to 1.0.y), except when noted otherwise in the changelog .IP \(bu 2 upgrading from 1.0.x to 1.1.x, except when noted otherwise in the changelog .UNINDENT .SS Borg 1.x.y upgrades .sp Use \fBborg upgrade \-\-tam REPO\fP to require manifest authentication introduced with Borg 1.0.9 to address security issues. This means that modifying the repository after doing this with a version prior to 1.0.9 will raise a validation error, so only perform this upgrade after updating all clients using the repository to 1.0.9 or newer. .sp This upgrade should be done on each client for safety reasons. .sp If a repository is accidentally modified with a pre\-1.0.9 client after this upgrade, use \fBborg upgrade \-\-tam \-\-force REPO\fP to remedy it. .sp If you routinely do this you might not want to enable this upgrade (which will leave you exposed to the security issue). You can reverse the upgrade by issuing \fBborg upgrade \-\-disable\-tam REPO\fP\&. .sp See \fI\%https://borgbackup.readthedocs.io/en/stable/changes.html#pre\-1\-0\-9\-manifest\-spoofing\-vulnerability\fP for details. .SS Attic and Borg 0.xx to Borg 1.x .sp This currently supports converting an Attic repository to Borg and also helps with converting Borg 0.xx to 1.0. .sp Currently, only LOCAL repositories can be upgraded (issue #465). .sp Please note that \fBborg create\fP (since 1.0.0) uses bigger chunks by default than old borg or attic did, so the new chunks won\(aqt deduplicate with the old chunks in the upgraded repository. See \fB\-\-chunker\-params\fP option of \fBborg create\fP and \fBborg recreate\fP\&. .sp \fBborg upgrade\fP will change the magic strings in the repository\(aqs segments to match the new Borg magic strings. The keyfiles found in $ATTIC_KEYS_DIR or ~/.attic/keys/ will also be converted and copied to $BORG_KEYS_DIR or ~/.config/borg/keys. .sp The cache files are converted, from $ATTIC_CACHE_DIR or ~/.cache/attic to $BORG_CACHE_DIR or ~/.cache/borg, but the cache layout between Borg and Attic changed, so it is possible the first backup after the conversion takes longer than expected due to the cache resync. .sp Upgrade should be able to resume if interrupted, although it will still iterate over all segments. If you want to start from scratch, use \fIborg delete\fP over the copied repository to make sure the cache files are also removed: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C borg delete borg .ft P .fi .UNINDENT .UNINDENT .sp Unless \fB\-\-inplace\fP is specified, the upgrade process first creates a backup copy of the repository, in REPOSITORY.before\-upgrade\-DATETIME, using hardlinks. This requires that the repository and its parent directory reside on same filesystem so the hardlink copy can work. This takes longer than in place upgrades, but is much safer and gives progress information (as opposed to \fBcp \-al\fP). Once you are satisfied with the conversion, you can safely destroy the backup copy. .sp WARNING: Running the upgrade in place will make the current copy unusable with older version, with no way of going back to previous versions. This can PERMANENTLY DAMAGE YOUR REPOSITORY! Attic CAN NOT READ BORG REPOSITORIES, as the magic strings have changed. You have been warned. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY path to the repository to be upgraded .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-n\fP,\fB \-\-dry\-run do not change repository .TP .B \-\-inplace rewrite repository in place, with no chance of going back to older versions of the repository. .TP .B \-\-force Force upgrade .TP .B \-\-tam Enable manifest authentication (in key and cache) (Borg 1.0.9 and later). .TP .B \-\-disable\-tam Disable manifest authentication (in key and cache). .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Upgrade the borg repository to the most recent version. $ borg upgrade \-v /path/to/repo making a hardlink copy in /path/to/repo.before\-upgrade\-2016\-02\-15\-20:51:55 opening attic repository with borg and converting no key file found for repository converting repo index /path/to/repo/index.0 converting 1 segments... converting borg 0.xx to borg current no key file found for repository .ft P .fi .UNINDENT .UNINDENT .SS Upgrading a passphrase encrypted attic repo .sp attic offered a "passphrase" encryption mode, but this was removed in borg 1.0 and replaced by the "repokey" mode (which stores the passphrase\-protected encryption key into the repository config). .sp Thus, to upgrade a "passphrase" attic repo to a "repokey" borg repo, 2 steps are needed, in this order: .INDENT 0.0 .IP \(bu 2 borg upgrade repo .IP \(bu 2 borg key migrate\-to\-repokey repo .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-with-lock.10000644000175000017500000000365213771325506020330 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-WITH-LOCK 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-with-lock \- run a user specified command with the repository lock held . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] with\-lock [options] REPOSITORY COMMAND [ARGS...] .SH DESCRIPTION .sp This command runs a user\-specified command while the repository lock is held. .sp It will first try to acquire the lock (make sure that no other operation is running in the repo), then execute the given command as a subprocess and wait for its termination, release the lock and return the user command\(aqs return code as borg\(aqs return code. .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 If you copy a repository with the lock held, the lock will be present in the copy. Thus, before using borg on the copy from a different host, you need to use "borg break\-lock" on the copied repository, because Borg is cautious and does not automatically remove stale locks made by a different host. .UNINDENT .UNINDENT .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY repository to lock .TP .B COMMAND command to run .TP .B ARGS command arguments .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borgfs.10000644000175000017500000001072513771325506016761 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORGFS 1 "2020-12-24" "" "borg backup tool" .SH NAME borgfs \- Mount archive or an entire repository as a FUSE filesystem . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borgfs [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] .SH DESCRIPTION .sp This command mounts an archive as a FUSE filesystem. This can be useful for browsing an archive or restoring individual files. Unless the \fB\-\-foreground\fP option is given the command will run in the background until the filesystem is \fBumounted\fP\&. .sp The command \fBborgfs\fP provides a wrapper for \fBborg mount\fP\&. This can also be used in fstab entries: \fB/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0\fP .sp To allow a regular user to use fstab entries, add the \fBuser\fP option: \fB/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0\fP .sp For FUSE configuration and mount options, see the mount.fuse(8) manual page. .sp Additional mount options supported by borg: .INDENT 0.0 .IP \(bu 2 versions: when used with a repository mount, this gives a merged, versioned view of the files in the archives. EXPERIMENTAL, layout may change in future. .IP \(bu 2 allow_damaged_files: by default damaged files (where missing chunks were replaced with runs of zeros by borg check \fB\-\-repair\fP) are not readable and return EIO (I/O error). Set this option to read such files. .IP \(bu 2 ignore_permissions: for security reasons the "default_permissions" mount option is internally enforced by borg. "ignore_permissions" can be given to not enforce "default_permissions". .UNINDENT .sp The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users to tweak the performance. It sets the number of cached data chunks; additional memory usage can be up to ~8 MiB times this number. The default is the number of CPU cores. .sp When the daemonized process receives a signal or crashes, it does not unmount. Unmounting in these cases could cause an active rsync or similar process to unintentionally delete data. .sp When running in the foreground ^C/SIGINT unmounts cleanly, but other signals or crashes do not. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE repository or archive to mount .TP .B MOUNTPOINT where to mount filesystem .TP .B PATH paths to extract; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-V\fP,\fB \-\-version show version number and exit .TP .B \-f\fP,\fB \-\-foreground stay in foreground, do not daemonize .TP .B \-o Extra mount options .UNINDENT .SS Archive filters .INDENT 0.0 .TP .BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP .BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP .BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N consider first N archives after other filters were applied .TP .BI \-\-last \ N consider last N archives after other filters were applied .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .TP .BI \-\-strip\-components \ NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-delete.10000644000175000017500000000647513771325506017677 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-DELETE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-delete \- Delete an existing repository or archives . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] delete [options] [REPOSITORY_OR_ARCHIVE] [ARCHIVE...] .SH DESCRIPTION .sp This command deletes an archive from the repository or the complete repository. Disk space is reclaimed accordingly. If you delete the complete repository, the local cache for it (if any) is also deleted. .sp When using \fB\-\-stats\fP, you will get some statistics about how much data was deleted \- the "Deleted data" deduplicated size there is most interesting as that is how much your repository will shrink. Please note that the "All archives" stats refer to the state after deletion. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE repository or archive to delete .TP .B ARCHIVE archives to delete .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-n\fP,\fB \-\-dry\-run do not change repository .TP .B \-s\fP,\fB \-\-stats print statistics for the deleted archive .TP .B \-\-cache\-only delete only the local cache for the given repository .TP .B \-\-force force deletion of corrupted archives, use \fB\-\-force \-\-force\fP in case \fB\-\-force\fP does not work. .TP .B \-\-save\-space work slower, but using less space .UNINDENT .SS Archive filters .INDENT 0.0 .TP .BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP .BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP .BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N consider first N archives after other filters were applied .TP .BI \-\-last \ N consider last N archives after other filters were applied .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # delete a single backup archive: $ borg delete /path/to/repo::Monday # delete the whole repository and the related local cache: $ borg delete /path/to/repo You requested to completely DELETE the repository *including* all archives it contains: repo Mon, 2016\-02\-15 19:26:54 root\-2016\-02\-15 Mon, 2016\-02\-15 19:36:29 newname Mon, 2016\-02\-15 19:50:19 Type \(aqYES\(aq if you understand this and want to continue: YES .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-compression.10000644000175000017500000000635613771325506020774 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-COMPRESSION 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-compression \- Details regarding compression . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH DESCRIPTION .sp It is no problem to mix different compression methods in one repo, deduplication is done on the source data chunks (not on the compressed or encrypted data). .sp If some specific chunk was once compressed and stored into the repo, creating another backup that also uses this chunk will not change the stored chunk. So if you use different compression specs for the backups, whichever stores a chunk first determines its compression. See also borg recreate. .sp Compression is lz4 by default. If you want something else, you have to specify what you want. .sp Valid compression specifiers are: .INDENT 0.0 .TP .B none Do not compress. .TP .B lz4 Use lz4 compression. Very high speed, very low compression. (default) .TP .B zstd[,L] Use zstd ("zstandard") compression, a modern wide\-range algorithm. If you do not explicitely give the compression level L (ranging from 1 to 22), it will use level 3. Archives compressed with zstd are not compatible with borg < 1.1.4. .TP .B zlib[,L] Use zlib ("gz") compression. Medium speed, medium compression. If you do not explicitely give the compression level L (ranging from 0 to 9), it will use level 6. Giving level 0 (means "no compression", but still has zlib protocol overhead) is usually pointless, you better use "none" compression. .TP .B lzma[,L] Use lzma ("xz") compression. Low speed, high compression. If you do not explicitely give the compression level L (ranging from 0 to 9), it will use level 6. Giving levels above 6 is pointless and counterproductive because it does not compress better due to the buffer size used by borg \- but it wastes lots of CPU cycles and RAM. .TP .B auto,C[,L] Use a built\-in heuristic to decide per chunk whether to compress or not. The heuristic tries with lz4 whether the data is compressible. For incompressible data, it will not use compression (uses "none"). For compressible data, it uses the given C[,L] compression \- with C[,L] being any valid compression specifier. .UNINDENT .sp Examples: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C borg create \-\-compression lz4 REPO::ARCHIVE data borg create \-\-compression zstd REPO::ARCHIVE data borg create \-\-compression zstd,10 REPO::ARCHIVE data borg create \-\-compression zlib REPO::ARCHIVE data borg create \-\-compression zlib,1 REPO::ARCHIVE data borg create \-\-compression auto,lzma,6 REPO::ARCHIVE data borg create \-\-compression auto,lzma ... .ft P .fi .UNINDENT .UNINDENT .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-umount.10000644000175000017500000000715213771325506017755 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-UMOUNT 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-umount \- un-mount the FUSE filesystem . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] umount [options] MOUNTPOINT .SH DESCRIPTION .sp This command un\-mounts a FUSE filesystem that was mounted with \fBborg mount\fP\&. .sp This is a convenience wrapper that just calls the platform\-specific shell command \- usually this is either umount or fusermount \-u. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B MOUNTPOINT mountpoint of the filesystem to umount .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Mounting the repository shows all archives. # Archives are loaded lazily, expect some delay when navigating to an archive # for the first time. $ borg mount /path/to/repo /tmp/mymountpoint $ ls /tmp/mymountpoint root\-2016\-02\-14 root\-2016\-02\-15 $ borg umount /tmp/mymountpoint # Mounting a specific archive is possible as well. $ borg mount /path/to/repo::root\-2016\-02\-15 /tmp/mymountpoint $ ls /tmp/mymountpoint bin boot etc home lib lib64 lost+found media mnt opt root sbin srv tmp usr var $ borg umount /tmp/mymountpoint # The experimental "versions view" merges all archives in the repository # and provides a versioned view on files. $ borg mount \-o versions /path/to/repo /tmp/mymountpoint $ ls \-l /tmp/mymountpoint/home/user/doc.txt/ total 24 \-rw\-rw\-r\-\- 1 user group 12357 Aug 26 21:19 doc.cda00bc9.txt \-rw\-rw\-r\-\- 1 user group 12204 Aug 26 21:04 doc.fa760f28.txt $ borg umount /tmp/mymountpoint # Archive filters are supported. # These are especially handy for the "versions view", # which does not support lazy processing of archives. $ borg mount \-o versions \-\-glob\-archives \(aq*\-my\-home\(aq \-\-last 10 /path/to/repo /tmp/mymountpoint # Exclusion options are supported. # These can speed up mounting and lower memory needs significantly. $ borg mount /path/to/repo /tmp/mymountpoint only/that/path $ borg mount \-\-exclude \(aq...\(aq /path/to/repo /tmp/mymountpoint .ft P .fi .UNINDENT .UNINDENT .SS borgfs .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ echo \(aq/mnt/backup /tmp/myrepo fuse.borgfs defaults,noauto 0 0\(aq >> /etc/fstab $ echo \(aq/mnt/backup::root\-2016\-02\-15 /tmp/myarchive fuse.borgfs defaults,noauto 0 0\(aq >> /etc/fstab $ mount /tmp/myrepo $ mount /tmp/myarchive $ ls /tmp/myrepo root\-2016\-02\-01 root\-2016\-02\-2015 $ ls /tmp/myarchive bin boot etc home lib lib64 lost+found media mnt opt root sbin srv tmp usr var .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 \fBborgfs\fP will be automatically provided if you used a distribution package, \fBpip\fP or \fBsetup.py\fP to install Borg. Users of the standalone binary will have to manually create a symlink (see \fIpyinstaller\-binary\fP). .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-mount(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-init.10000644000175000017500000001703113771325506017366 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-INIT 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-init \- Initialize an empty repository . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] init [options] [REPOSITORY] .SH DESCRIPTION .sp This command initializes an empty repository. A repository is a filesystem directory containing the deduplicated data from zero or more archives. .sp Encryption can be enabled at repository init time. It cannot be changed later. .sp It is not recommended to work without encryption. Repository encryption protects you e.g. against the case that an attacker has access to your backup repository. .sp Borg relies on randomly generated key material and uses that for chunking, id generation, encryption and authentication. The key material is encrypted using the passphrase you give before it is stored on\-disk. .sp You need to be careful with the key / the passphrase: .sp If you want "passphrase\-only" security, use one of the repokey modes. The key will be stored inside the repository (in its "config" file). In above mentioned attack scenario, the attacker will have the key (but not the passphrase). .sp If you want "passphrase and having\-the\-key" security, use one of the keyfile modes. The key will be stored in your home directory (in .config/borg/keys). In the attack scenario, the attacker who has just access to your repo won\(aqt have the key (and also not the passphrase). .sp Make a backup copy of the key file (keyfile mode) or repo config file (repokey mode) and keep it at a safe place, so you still have the key in case it gets corrupted or lost. Also keep the passphrase at a safe place. The backup that is encrypted with that key won\(aqt help you with that, of course. .sp Make sure you use a good passphrase. Not too short, not too simple. The real encryption / decryption key is encrypted with / locked by your passphrase. If an attacker gets your key, he can\(aqt unlock and use it without knowing the passphrase. .sp Be careful with special or non\-ascii characters in your passphrase: .INDENT 0.0 .IP \(bu 2 Borg processes the passphrase as unicode (and encodes it as utf\-8), so it does not have problems dealing with even the strangest characters. .IP \(bu 2 BUT: that does not necessarily apply to your OS / VM / keyboard configuration. .UNINDENT .sp So better use a long passphrase made from simple ascii chars than one that includes non\-ascii stuff or characters that are hard/impossible to enter on a different keyboard layout. .sp You can change your passphrase for existing repos at any time, it won\(aqt affect the encryption/decryption key or other secrets. .SS Encryption modes .sp You can choose from the encryption modes seen in the table below on a per\-repo basis. The mode determines encryption algorithm, hash/MAC algorithm and also the key storage location. .sp Example: \fIborg init \-\-encryption repokey ...\fP .\" nanorst: inline-fill . .TS center; |l|l|l|l|. _ T{ Hash/MAC T} T{ Not encrypted no auth T} T{ Not encrypted, but authenticated T} T{ Encrypted (AEAD w/ AES) and authenticated T} _ T{ SHA\-256 T} T{ none T} T{ \fIauthenticated\fP T} T{ repokey keyfile T} _ T{ BLAKE2b T} T{ n/a T} T{ \fIauthenticated\-blake2\fP T} T{ \fIrepokey\-blake2\fP \fIkeyfile\-blake2\fP T} _ .TE .\" nanorst: inline-replace . .sp Modes \fImarked like this\fP in the above table are new in Borg 1.1 and are not backwards\-compatible with Borg 1.0.x. .sp On modern Intel/AMD CPUs (except very cheap ones), AES is usually hardware\-accelerated. BLAKE2b is faster than SHA256 on Intel/AMD 64\-bit CPUs (except AMD Ryzen and future CPUs with SHA extensions), which makes \fIauthenticated\-blake2\fP faster than \fInone\fP and \fIauthenticated\fP\&. .sp On modern ARM CPUs, NEON provides hardware acceleration for SHA256 making it faster than BLAKE2b\-256 there. NEON accelerates AES as well. .sp Hardware acceleration is always used automatically when available. .sp \fIrepokey\fP and \fIkeyfile\fP use AES\-CTR\-256 for encryption and HMAC\-SHA256 for authentication in an encrypt\-then\-MAC (EtM) construction. The chunk ID hash is HMAC\-SHA256 as well (with a separate key). These modes are compatible with Borg 1.0.x. .sp \fIrepokey\-blake2\fP and \fIkeyfile\-blake2\fP are also authenticated encryption modes, but use BLAKE2b\-256 instead of HMAC\-SHA256 for authentication. The chunk ID hash is a keyed BLAKE2b\-256 hash. These modes are new and \fInot\fP compatible with Borg 1.0.x. .sp \fIauthenticated\fP mode uses no encryption, but authenticates repository contents through the same HMAC\-SHA256 hash as the \fIrepokey\fP and \fIkeyfile\fP modes (it uses it as the chunk ID hash). The key is stored like \fIrepokey\fP\&. This mode is new and \fInot\fP compatible with Borg 1.0.x. .sp \fIauthenticated\-blake2\fP is like \fIauthenticated\fP, but uses the keyed BLAKE2b\-256 hash from the other blake2 modes. This mode is new and \fInot\fP compatible with Borg 1.0.x. .sp \fInone\fP mode uses no encryption and no authentication. It uses SHA256 as chunk ID hash. This mode is not recommended, you should rather consider using an authenticated or authenticated/encrypted mode. This mode has possible denial\-of\-service issues when running \fBborg create\fP on contents controlled by an attacker. Use it only for new repositories where no encryption is wanted \fBand\fP when compatibility with 1.0.x is important. If compatibility with 1.0.x is not important, use \fIauthenticated\-blake2\fP or \fIauthenticated\fP instead. This mode is compatible with Borg 1.0.x. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY repository to create .UNINDENT .SS optional arguments .INDENT 0.0 .TP .BI \-e \ MODE\fR,\fB \ \-\-encryption \ MODE select encryption key mode \fB(required)\fP .TP .B \-\-append\-only create an append\-only mode repository .TP .BI \-\-storage\-quota \ QUOTA Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota. .TP .B \-\-make\-parent\-dirs create the parent directories of the repository directory, if they are missing. .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Local repository, repokey encryption, BLAKE2b (often faster, since Borg 1.1) $ borg init \-\-encryption=repokey\-blake2 /path/to/repo # Local repository (no encryption) $ borg init \-\-encryption=none /path/to/repo # Remote repository (accesses a remote borg via ssh) # repokey: stores the (encrypted) key into /config $ borg init \-\-encryption=repokey\-blake2 user@hostname:backup # Remote repository (accesses a remote borg via ssh) # keyfile: stores the (encrypted) key into ~/.config/borg/keys/ $ borg init \-\-encryption=keyfile user@hostname:backup .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-create(1)\fP, \fIborg\-delete(1)\fP, \fIborg\-check(1)\fP, \fIborg\-list(1)\fP, \fIborg\-key\-import(1)\fP, \fIborg\-key\-export(1)\fP, \fIborg\-key\-change\-passphrase(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-benchmark-crud.10000644000175000017500000000671013771325506021312 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-BENCHMARK-CRUD 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-benchmark-crud \- Benchmark Create, Read, Update, Delete for archives. . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] benchmark crud [options] REPOSITORY PATH .SH DESCRIPTION .sp This command benchmarks borg CRUD (create, read, update, delete) operations. .sp It creates input data below the given PATH and backups this data into the given REPO. The REPO must already exist (it could be a fresh empty repo or an existing repo, the command will create / read / update / delete some archives named borg\-benchmark\-crud* there. .sp Make sure you have free space there, you\(aqll need about 1GB each (+ overhead). .sp If your repository is encrypted and borg needs a passphrase to unlock the key, use: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH .ft P .fi .UNINDENT .UNINDENT .sp Measurements are done with different input file sizes and counts. The file contents are very artificial (either all zero or all random), thus the measurement results do not necessarily reflect performance with real data. Also, due to the kind of content used, no compression is used in these benchmarks. .INDENT 0.0 .TP .B C\- == borg create (1st archive creation, no compression, do not use files cache) C\-Z\- == all\-zero files. full dedup, this is primarily measuring reader/chunker/hasher. C\-R\- == random files. no dedup, measuring throughput through all processing stages. .TP .B R\- == borg extract (extract archive, dry\-run, do everything, but do not write files to disk) R\-Z\- == all zero files. Measuring heavily duplicated files. R\-R\- == random files. No duplication here, measuring throughput through all processing stages, except writing to disk. .TP .B U\- == borg create (2nd archive creation of unchanged input files, measure files cache speed) The throughput value is kind of virtual here, it does not actually read the file. U\-Z\- == needs to check the 2 all\-zero chunks\(aq existence in the repo. U\-R\- == needs to check existence of a lot of different chunks in the repo. .TP .B D\- == borg delete archive (delete last remaining archive, measure deletion + compaction) D\-Z\- == few chunks to delete / few segments to compact/remove. D\-R\- == many chunks to delete / many segments to compact/remove. .UNINDENT .sp Please note that there might be quite some variance in these measurements. Try multiple measurements and having a otherwise idle machine (and network, if you use it). .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY repository to use for benchmark (must exist) .TP .B PATH path were to create benchmark input data .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-create.10000644000175000017500000003776413771325506017705 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-CREATE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-create \- Create new archive . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] create [options] ARCHIVE [PATH...] .SH DESCRIPTION .sp This command creates a backup archive containing all files found while recursively traversing all paths specified. Paths are added to the archive as they are given, that means if relative paths are desired, the command has to be run from the correct directory. .sp When giving \(aq\-\(aq as path, borg will read data from standard input and create a file \(aqstdin\(aq in the created archive from that data. See section \fIReading from stdin\fP below for details. .sp The archive will consume almost no disk space for files or parts of files that have already been stored in other archives. .sp The archive name needs to be unique. It must not end in \(aq.checkpoint\(aq or \(aq.checkpoint.N\(aq (with N being a number), because these names are used for checkpoints and treated in special ways. .sp In the archive name, you may use the following placeholders: {now}, {utcnow}, {fqdn}, {hostname}, {user} and some others. .sp Backup speed is increased by not reprocessing files that are already part of existing archives and weren\(aqt modified. The detection of unmodified files is done by comparing multiple file metadata values with previous values kept in the files cache. .sp This comparison can operate in different modes as given by \fB\-\-files\-cache\fP: .INDENT 0.0 .IP \(bu 2 ctime,size,inode (default) .IP \(bu 2 mtime,size,inode (default behaviour of borg versions older than 1.1.0rc4) .IP \(bu 2 ctime,size (ignore the inode number) .IP \(bu 2 mtime,size (ignore the inode number) .IP \(bu 2 rechunk,ctime (all files are considered modified \- rechunk, cache ctime) .IP \(bu 2 rechunk,mtime (all files are considered modified \- rechunk, cache mtime) .IP \(bu 2 disabled (disable the files cache, all files considered modified \- rechunk) .UNINDENT .sp inode number: better safety, but often unstable on network filesystems .sp Normally, detecting file modifications will take inode information into consideration to improve the reliability of file change detection. This is problematic for files located on sshfs and similar network file systems which do not provide stable inode numbers, such files will always be considered modified. You can use modes without \fIinode\fP in this case to improve performance, but reliability of change detection might be reduced. .sp ctime vs. mtime: safety vs. speed .INDENT 0.0 .IP \(bu 2 ctime is a rather safe way to detect changes to a file (metadata and contents) as it can not be set from userspace. But, a metadata\-only change will already update the ctime, so there might be some unnecessary chunking/hashing even without content changes. Some filesystems do not support ctime (change time). E.g. doing a chown or chmod to a file will change its ctime. .IP \(bu 2 mtime usually works and only updates if file contents were changed. But mtime can be arbitrarily set from userspace, e.g. to set mtime back to the same value it had before a content change happened. This can be used maliciously as well as well\-meant, but in both cases mtime based cache modes can be problematic. .UNINDENT .sp The mount points of filesystems or filesystem snapshots should be the same for every creation of a new archive to ensure fast operation. This is because the file cache that is used to determine changed files quickly uses absolute filenames. If this is not possible, consider creating a bind mount to a stable location. .sp The \fB\-\-progress\fP option shows (from left to right) Original, Compressed and Deduplicated (O, C and D, respectively), then the Number of files (N) processed so far, followed by the currently processed path. .sp When using \fB\-\-stats\fP, you will get some statistics about how much data was added \- the "This Archive" deduplicated size there is most interesting as that is how much your repository will grow. Please note that the "All archives" stats refer to the state after creation. Also, the \fB\-\-stats\fP and \fB\-\-dry\-run\fP options are mutually exclusive because the data is not actually compressed and deduplicated during a dry run. .sp See the output of the "borg help patterns" command for more help on exclude patterns. .sp See the output of the "borg help placeholders" command for more help on placeholders. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B ARCHIVE name of archive to create (must be also a valid directory name) .TP .B PATH paths to archive .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-n\fP,\fB \-\-dry\-run do not create a backup archive .TP .B \-s\fP,\fB \-\-stats print statistics for the created archive .TP .B \-\-list output verbose list of items (files, dirs, ...) .TP .BI \-\-filter \ STATUSCHARS only display items with the given status characters (see description) .TP .B \-\-json output stats as JSON. Implies \fB\-\-stats\fP\&. .TP .B \-\-no\-cache\-sync experimental: do not synchronize the cache. Implies not using the files cache. .TP .B \-\-no\-files\-cache do not load/update the file metadata cache used to detect unchanged files .TP .BI \-\-stdin\-name \ NAME use NAME in archive for stdin data (default: "stdin") .TP .BI \-\-stdin\-user \ USER set user USER in archive for stdin data (default: \(aqroot\(aq) .TP .BI \-\-stdin\-group \ GROUP set group GROUP in archive for stdin data (default: \(aqroot\(aq) .TP .BI \-\-stdin\-mode \ M set mode to M in archive for stdin data (default: 0660) .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .TP .B \-\-exclude\-caches exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.bford.info/cachedir/spec.html\fP) .TP .BI \-\-exclude\-if\-present \ NAME exclude directories that are tagged by containing a filesystem object with the given NAME .TP .B \-\-keep\-exclude\-tags\fP,\fB \-\-keep\-tag\-files if tag objects are specified with \fB\-\-exclude\-if\-present\fP, don\(aqt omit the tag objects themselves from the backup archive .TP .B \-\-exclude\-nodump exclude files flagged NODUMP .UNINDENT .SS Filesystem options .INDENT 0.0 .TP .B \-x\fP,\fB \-\-one\-file\-system stay in the same file system and do not store mount points of other file systems. This might behave different from your expectations, see the docs. .TP .B \-\-numeric\-owner only store numeric user and group identifiers .TP .B \-\-noatime do not store atime into archive .TP .B \-\-noctime do not store ctime into archive .TP .B \-\-nobirthtime do not store birthtime (creation date) into archive .TP .B \-\-nobsdflags do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive .TP .B \-\-ignore\-inode ignore inode data in the file metadata cache used to detect unchanged files. .TP .BI \-\-files\-cache \ MODE operate files cache in MODE. default: ctime,size,inode .TP .B \-\-read\-special open and read block and char device files as well as FIFOs as if they were regular files. Also follows symlinks pointing to these kinds of files. .UNINDENT .SS Archive options .INDENT 0.0 .TP .BI \-\-comment \ COMMENT add a comment text to the archive .TP .BI \-\-timestamp \ TIMESTAMP manually specify the archive creation date/time (UTC, yyyy\-mm\-ddThh:mm:ss format). Alternatively, give a reference file/directory. .TP .BI \-c \ SECONDS\fR,\fB \ \-\-checkpoint\-interval \ SECONDS write checkpoint every SECONDS seconds (Default: 1800) .TP .BI \-\-chunker\-params \ PARAMS specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE). default: 19,23,21,4095 .TP .BI \-C \ COMPRESSION\fR,\fB \ \-\-compression \ COMPRESSION select compression algorithm, see the output of the "borg help compression" command for details. .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Backup ~/Documents into an archive named "my\-documents" $ borg create /path/to/repo::my\-documents ~/Documents # same, but list all files as we process them $ borg create \-\-list /path/to/repo::my\-documents ~/Documents # Backup ~/Documents and ~/src but exclude pyc files $ borg create /path/to/repo::my\-files \e ~/Documents \e ~/src \e \-\-exclude \(aq*.pyc\(aq # Backup home directories excluding image thumbnails (i.e. only # /home//.thumbnails is excluded, not /home/*/*/.thumbnails etc.) $ borg create /path/to/repo::my\-files /home \e \-\-exclude \(aqsh:/home/*/.thumbnails\(aq # Backup the root filesystem into an archive named "root\-YYYY\-MM\-DD" # use zlib compression (good, but slow) \- default is lz4 (fast, low compression ratio) $ borg create \-C zlib,6 \-\-one\-file\-system /path/to/repo::root\-{now:%Y\-%m\-%d} / # Backup a remote host locally ("pull" style) using sshfs $ mkdir sshfs\-mount $ sshfs root@example.com:/ sshfs\-mount $ cd sshfs\-mount $ borg create /path/to/repo::example.com\-root\-{now:%Y\-%m\-%d} . $ cd .. $ fusermount \-u sshfs\-mount # Make a big effort in fine granular deduplication (big chunk management # overhead, needs a lot of RAM and disk space, see formula in internals # docs \- same parameters as borg < 1.0 or attic): $ borg create \-\-chunker\-params 10,23,16,4095 /path/to/repo::small /smallstuff # Backup a raw device (must not be active/in use/mounted at that time) $ dd if=/dev/sdx bs=10M | borg create /path/to/repo::my\-sdx \- # No compression (none) $ borg create \-\-compression none /path/to/repo::arch ~ # Super fast, low compression (lz4, default) $ borg create /path/to/repo::arch ~ # Less fast, higher compression (zlib, N = 0..9) $ borg create \-\-compression zlib,N /path/to/repo::arch ~ # Even slower, even higher compression (lzma, N = 0..9) $ borg create \-\-compression lzma,N /path/to/repo::arch ~ # Only compress compressible data with lzma,N (N = 0..9) $ borg create \-\-compression auto,lzma,N /path/to/repo::arch ~ # Use short hostname, user name and current time in archive name $ borg create /path/to/repo::{hostname}\-{user}\-{now} ~ # Similar, use the same datetime format that is default as of borg 1.1 $ borg create /path/to/repo::{hostname}\-{user}\-{now:%Y\-%m\-%dT%H:%M:%S} ~ # As above, but add nanoseconds $ borg create /path/to/repo::{hostname}\-{user}\-{now:%Y\-%m\-%dT%H:%M:%S.%f} ~ # Backing up relative paths by moving into the correct directory first $ cd /home/user/Documents # The root directory of the archive will be "projectA" $ borg create /path/to/repo::daily\-projectA\-{now:%Y\-%m\-%d} projectA .ft P .fi .UNINDENT .UNINDENT .SH NOTES .sp The \fB\-\-exclude\fP patterns are not like tar. In tar \fB\-\-exclude\fP .bundler/gems will exclude foo/.bundler/gems. In borg it will not, you need to use \fB\-\-exclude\fP \(aq*/.bundler/gems\(aq to get the same effect. See \fBborg help patterns\fP for more information. .sp In addition to using \fB\-\-exclude\fP patterns, it is possible to use \fB\-\-exclude\-if\-present\fP to specify the name of a filesystem object (e.g. a file or folder name) which, when contained within another folder, will prevent the containing folder from being backed up. By default, the containing folder and all of its contents will be omitted from the backup. If, however, you wish to only include the objects specified by \fB\-\-exclude\-if\-present\fP in your backup, and not include any other contents of the containing folder, this can be enabled through using the \fB\-\-keep\-exclude\-tags\fP option. .sp The \fB\-x\fP or \fB\-\-one\-file\-system\fP option excludes directories, that are mountpoints (and everything in them). It detects mountpoints by comparing the device number from the output of \fBstat()\fP of the directory and its parent directory. Specifically, it excludes directories for which \fBstat()\fP reports a device number different from the device number of their parent. Be aware that in Linux (and possibly elsewhere) there are directories with device number different from their parent, which the kernel does not consider a mountpoint and also the other way around. Examples are bind mounts (possibly same device number, but always a mountpoint) and ALL subvolumes of a btrfs (different device number from parent but not necessarily a mountpoint). Therefore when using \fB\-\-one\-file\-system\fP, one should make doubly sure that the backup works as intended especially when using btrfs. This is even more important, if the btrfs layout was created by someone else, e.g. a distribution installer. .SS Item flags .sp \fB\-\-list\fP outputs a list of all files, directories and other file system items it considered (no matter whether they had content changes or not). For each item, it prefixes a single\-letter flag that indicates type and/or status of the item. .sp If you are interested only in a subset of that output, you can give e.g. \fB\-\-filter=AME\fP and it will only show regular files with A, M or E status (see below). .sp A uppercase character represents the status of a regular file relative to the "files" cache (not relative to the repo \-\- this is an issue if the files cache is not used). Metadata is stored in any case and for \(aqA\(aq and \(aqM\(aq also new data chunks are stored. For \(aqU\(aq all data chunks refer to already existing chunks. .INDENT 0.0 .IP \(bu 2 \(aqA\(aq = regular file, added (see also \fIa_status_oddity\fP in the FAQ) .IP \(bu 2 \(aqM\(aq = regular file, modified .IP \(bu 2 \(aqU\(aq = regular file, unchanged .IP \(bu 2 \(aqE\(aq = regular file, an error happened while accessing/reading \fIthis\fP file .UNINDENT .sp A lowercase character means a file type other than a regular file, borg usually just stores their metadata: .INDENT 0.0 .IP \(bu 2 \(aqd\(aq = directory .IP \(bu 2 \(aqb\(aq = block device .IP \(bu 2 \(aqc\(aq = char device .IP \(bu 2 \(aqh\(aq = regular file, hardlink (to already seen inodes) .IP \(bu 2 \(aqs\(aq = symlink .IP \(bu 2 \(aqf\(aq = fifo .UNINDENT .sp Other flags used include: .INDENT 0.0 .IP \(bu 2 \(aqi\(aq = backup data was read from standard input (stdin) .IP \(bu 2 \(aq\-\(aq = dry run, item was \fInot\fP backed up .IP \(bu 2 \(aqx\(aq = excluded, item was \fInot\fP backed up .IP \(bu 2 \(aq?\(aq = missing status code (if you see this, please file a bug report!) .UNINDENT .SS Reading from stdin .sp To read from stdin, specify \fB\-\fP as path and pipe directly to borg: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C backup\-vm \-\-id myvm \-\-stdout | borg create REPO::ARCHIVE \- .ft P .fi .UNINDENT .UNINDENT .sp Note that piping to borg creates an archive even if the command piping to borg exits with a failure. In this case, \fBone can end up with truncated output being backed up\fP\&. .sp Reading from stdin yields just a stream of data without file metadata associated with it, and the files cache is not needed at all. So it is safe to disable it via \fB\-\-no\-files\-cache\fP and speed up backup creation a bit. .sp By default, the content read from stdin is stored in a file called \(aqstdin\(aq. Use \fB\-\-stdin\-name\fP to change the name. .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-delete(1)\fP, \fIborg\-prune(1)\fP, \fIborg\-check(1)\fP, \fIborg\-patterns(1)\fP, \fIborg\-placeholders(1)\fP, \fIborg\-compression(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-key-change-passphrase.10000644000175000017500000000525513771325506022612 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-KEY-CHANGE-PASSPHRASE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-key-change-passphrase \- Change repository key file passphrase . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] key change\-passphrase [options] [REPOSITORY] .SH DESCRIPTION .sp The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. .sp Please note that this command only changes the passphrase, but not any secret protected by it (like e.g. encryption/MAC keys or chunker seed). Thus, changing the passphrase after passphrase and borg key got compromised does not protect future (nor past) backups to the same repository. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .sp REPOSITORY .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Create a key file protected repository $ borg init \-\-encryption=keyfile \-v /path/to/repo Initializing repository at "/path/to/repo" Enter new passphrase: Enter same passphrase again: Remember your passphrase. Your data will be inaccessible without it. Key in "/root/.config/borg/keys/mnt_backup" created. Keep this key safe. Your data will be inaccessible without it. Synchronizing chunks cache... Archives: 0, w/ cached Idx: 0, w/ outdated Idx: 0, w/o cached Idx: 0. Done. # Change key file passphrase $ borg key change\-passphrase \-v /path/to/repo Enter passphrase for key /root/.config/borg/keys/mnt_backup: Enter new passphrase: Enter same passphrase again: Remember your passphrase. Your data will be inaccessible without it. Key updated .ft P .fi .UNINDENT .UNINDENT .sp Fully automated using environment variables: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ BORG_NEW_PASSPHRASE=old borg init \-e=repokey repo # now "old" is the current passphrase. $ BORG_PASSPHRASE=old BORG_NEW_PASSPHRASE=new borg key change\-passphrase repo # now "new" is the current passphrase. .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-common.10000644000175000017500000000466313771325506017722 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-COMMON 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-common \- Common options of Borg commands . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .TP .B \-h\fP,\fB \-\-help show this help message and exit .TP .B \-\-critical work on log level CRITICAL .TP .B \-\-error work on log level ERROR .TP .B \-\-warning work on log level WARNING (default) .TP .B \-\-info\fP,\fB \-v\fP,\fB \-\-verbose work on log level INFO .TP .B \-\-debug enable debug output, work on log level DEBUG .TP .BI \-\-debug\-topic \ TOPIC enable TOPIC debugging (can be specified multiple times). The logger path is borg.debug. if TOPIC is not fully qualified. .TP .B \-p\fP,\fB \-\-progress show progress information .TP .B \-\-log\-json Output one JSON object per log line instead of formatted text. .TP .BI \-\-lock\-wait \ SECONDS wait at most SECONDS for acquiring a repository/cache lock (default: 1). .TP .B \-\-bypass\-lock Bypass locking mechanism .TP .B \-\-show\-version show/log the borg version .TP .B \-\-show\-rc show/log the return code (rc) .TP .BI \-\-umask \ M set umask to M (local and remote, default: 0077) .TP .BI \-\-remote\-path \ PATH use PATH as borg executable on the remote (default: "borg") .TP .BI \-\-remote\-ratelimit \ RATE set remote network upload rate limit in kiByte/s (default: 0=unlimited) .TP .B \-\-consider\-part\-files treat part files like normal files (e.g. to list/extract them) .TP .BI \-\-debug\-profile \ FILE Write execution profile in Borg format into FILE. For local use a Python\-compatible file can be generated by suffixing FILE with ".pyprof". .TP .BI \-\-rsh \ RSH Use this command to connect to the \(aqborg serve\(aq process (default: \(aqssh\(aq) .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-extract.10000644000175000017500000000751413771325506020102 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-EXTRACT 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-extract \- Extract archive contents . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] extract [options] ARCHIVE [PATH...] .SH DESCRIPTION .sp This command extracts the contents of an archive. By default the entire archive is extracted but a subset of files and directories can be selected by passing a list of \fBPATHs\fP as arguments. The file selection can further be restricted by using the \fB\-\-exclude\fP option. .sp See the output of the "borg help patterns" command for more help on exclude patterns. .sp By using \fB\-\-dry\-run\fP, you can do all extraction steps except actually writing the output data: reading metadata and data chunks from the repo, checking the hash/hmac, decrypting, decompressing. .sp \fB\-\-progress\fP can be slower than no progress display, since it makes one additional pass over the archive metadata. .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 Currently, extract always writes into the current working directory ("."), so make sure you \fBcd\fP to the right place before calling \fBborg extract\fP\&. .UNINDENT .UNINDENT .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B ARCHIVE archive to extract .TP .B PATH paths to extract; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-list output verbose list of items (files, dirs, ...) .TP .B \-n\fP,\fB \-\-dry\-run do not actually change any files .TP .B \-\-numeric\-owner only obey numeric user and group identifiers .TP .B \-\-nobsdflags do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE) .TP .B \-\-stdout write all extracted data to stdout .TP .B \-\-sparse create holes in output sparse file from all\-zero chunks .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .TP .BI \-\-strip\-components \ NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Extract entire archive $ borg extract /path/to/repo::my\-files # Extract entire archive and list files while processing $ borg extract \-\-list /path/to/repo::my\-files # Verify whether an archive could be successfully extracted, but do not write files to disk $ borg extract \-\-dry\-run /path/to/repo::my\-files # Extract the "src" directory $ borg extract /path/to/repo::my\-files home/USERNAME/src # Extract the "src" directory but exclude object files $ borg extract /path/to/repo::my\-files home/USERNAME/src \-\-exclude \(aq*.o\(aq # Restore a raw device (must not be active/in use/mounted at that time) $ borg extract \-\-stdout /path/to/repo::my\-sdx | dd of=/dev/sdx bs=10M .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-mount(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-placeholders.10000644000175000017500000000570213771325506021072 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-PLACEHOLDERS 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-placeholders \- Details regarding placeholders . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH DESCRIPTION .sp Repository (or Archive) URLs, \fB\-\-prefix\fP, \fB\-\-glob\-archives\fP, \fB\-\-comment\fP and \fB\-\-remote\-path\fP values support these placeholders: .INDENT 0.0 .TP .B {hostname} The (short) hostname of the machine. .TP .B {fqdn} The full name of the machine. .TP .B {reverse\-fqdn} The full name of the machine in reverse domain name notation. .TP .B {now} The current local date and time, by default in ISO\-8601 format. You can also supply your own \fI\%format string\fP, e.g. {now:%Y\-%m\-%d_%H:%M:%S} .TP .B {utcnow} The current UTC date and time, by default in ISO\-8601 format. You can also supply your own \fI\%format string\fP, e.g. {utcnow:%Y\-%m\-%d_%H:%M:%S} .TP .B {user} The user name (or UID, if no name is available) of the user running borg. .TP .B {pid} The current process ID. .TP .B {borgversion} The version of borg, e.g.: 1.0.8rc1 .TP .B {borgmajor} The version of borg, only the major version, e.g.: 1 .TP .B {borgminor} The version of borg, only major and minor version, e.g.: 1.0 .TP .B {borgpatch} The version of borg, only major, minor and patch version, e.g.: 1.0.8 .UNINDENT .sp If literal curly braces need to be used, double them for escaping: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C borg create /path/to/repo::{{literal_text}} .ft P .fi .UNINDENT .UNINDENT .sp Examples: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C borg create /path/to/repo::{hostname}\-{user}\-{utcnow} ... borg create /path/to/repo::{hostname}\-{now:%Y\-%m\-%d_%H:%M:%S} ... borg prune \-\-prefix \(aq{hostname}\-\(aq ... .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 systemd uses a difficult, non\-standard syntax for command lines in unit files (refer to the \fIsystemd.unit(5)\fP manual page). .sp When invoking borg from unit files, pay particular attention to escaping, especially when using the now/utcnow placeholders, since systemd performs its own %\-based variable replacement even in quoted text. To avoid interference from systemd, double all percent signs (\fB{hostname}\-{now:%Y\-%m\-%d_%H:%M:%S}\fP becomes \fB{hostname}\-{now:%%Y\-%%m\-%%d_%%H:%%M:%%S}\fP). .UNINDENT .UNINDENT .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-mount.10000644000175000017500000001074513771325506017572 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-MOUNT 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-mount \- Mount archive or an entire repository as a FUSE filesystem . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] mount [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] .SH DESCRIPTION .sp This command mounts an archive as a FUSE filesystem. This can be useful for browsing an archive or restoring individual files. Unless the \fB\-\-foreground\fP option is given the command will run in the background until the filesystem is \fBumounted\fP\&. .sp The command \fBborgfs\fP provides a wrapper for \fBborg mount\fP\&. This can also be used in fstab entries: \fB/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0\fP .sp To allow a regular user to use fstab entries, add the \fBuser\fP option: \fB/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0\fP .sp For FUSE configuration and mount options, see the mount.fuse(8) manual page. .sp Additional mount options supported by borg: .INDENT 0.0 .IP \(bu 2 versions: when used with a repository mount, this gives a merged, versioned view of the files in the archives. EXPERIMENTAL, layout may change in future. .IP \(bu 2 allow_damaged_files: by default damaged files (where missing chunks were replaced with runs of zeros by borg check \fB\-\-repair\fP) are not readable and return EIO (I/O error). Set this option to read such files. .IP \(bu 2 ignore_permissions: for security reasons the "default_permissions" mount option is internally enforced by borg. "ignore_permissions" can be given to not enforce "default_permissions". .UNINDENT .sp The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users to tweak the performance. It sets the number of cached data chunks; additional memory usage can be up to ~8 MiB times this number. The default is the number of CPU cores. .sp When the daemonized process receives a signal or crashes, it does not unmount. Unmounting in these cases could cause an active rsync or similar process to unintentionally delete data. .sp When running in the foreground ^C/SIGINT unmounts cleanly, but other signals or crashes do not. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE repository or archive to mount .TP .B MOUNTPOINT where to mount filesystem .TP .B PATH paths to extract; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-f\fP,\fB \-\-foreground stay in foreground, do not daemonize .TP .B \-o Extra mount options .UNINDENT .SS Archive filters .INDENT 0.0 .TP .BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP .BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP .BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N consider first N archives after other filters were applied .TP .BI \-\-last \ N consider last N archives after other filters were applied .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .TP .BI \-\-strip\-components \ NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-umount(1)\fP, \fIborg\-extract(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-key-import.10000644000175000017500000000316413771325506020525 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-KEY-IMPORT 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-key-import \- Import the repository key from backup . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] key import [options] [REPOSITORY] [PATH] .SH DESCRIPTION .sp This command allows to restore a key previously backed up with the export command. .sp If the \fB\-\-paper\fP option is given, the import will be an interactive process in which each line is checked for plausibility before proceeding to the next line. For this format PATH must not be given. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .sp REPOSITORY .INDENT 0.0 .TP .B PATH path to the backup (\(aq\-\(aq to read from stdin) .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-paper interactively import from a backup done with \fB\-\-paper\fP .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-key\-export(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-list.10000644000175000017500000001610213771325506017374 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-LIST 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-list \- List archive or repository contents . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] list [options] [REPOSITORY_OR_ARCHIVE] [PATH...] .SH DESCRIPTION .sp This command lists the contents of a repository or an archive. .sp See the "borg help patterns" command for more help on exclude patterns. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE repository or archive to list contents of .TP .B PATH paths to list; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-short only print file/directory names, nothing else .TP .BI \-\-format \ FORMAT\fR,\fB \ \-\-list\-format \ FORMAT specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NL}") .TP .B \-\-json Only valid for listing repository contents. Format output as JSON. The form of \fB\-\-format\fP is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "barchive" key is therefore not available. .TP .B \-\-json\-lines Only valid for listing archive contents. Format output as JSON Lines. The form of \fB\-\-format\fP is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "bpath" key is therefore not available. .UNINDENT .SS Archive filters .INDENT 0.0 .TP .BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP .BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP .BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N consider first N archives after other filters were applied .TP .BI \-\-last \ N consider last N archives after other filters were applied .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ borg list /path/to/repo Monday Mon, 2016\-02\-15 19:15:11 repo Mon, 2016\-02\-15 19:26:54 root\-2016\-02\-15 Mon, 2016\-02\-15 19:36:29 newname Mon, 2016\-02\-15 19:50:19 \&... $ borg list /path/to/repo::root\-2016\-02\-15 drwxr\-xr\-x root root 0 Mon, 2016\-02\-15 17:44:27 . drwxrwxr\-x root root 0 Mon, 2016\-02\-15 19:04:49 bin \-rwxr\-xr\-x root root 1029624 Thu, 2014\-11\-13 00:08:51 bin/bash lrwxrwxrwx root root 0 Fri, 2015\-03\-27 20:24:26 bin/bzcmp \-> bzdiff \-rwxr\-xr\-x root root 2140 Fri, 2015\-03\-27 20:24:22 bin/bzdiff \&... $ borg list /path/to/repo::root\-2016\-02\-15 \-\-pattern "\- bin/ba*" drwxr\-xr\-x root root 0 Mon, 2016\-02\-15 17:44:27 . drwxrwxr\-x root root 0 Mon, 2016\-02\-15 19:04:49 bin lrwxrwxrwx root root 0 Fri, 2015\-03\-27 20:24:26 bin/bzcmp \-> bzdiff \-rwxr\-xr\-x root root 2140 Fri, 2015\-03\-27 20:24:22 bin/bzdiff \&... $ borg list /path/to/repo::archiveA \-\-format="{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NEWLINE}" drwxrwxr\-x user user 0 Sun, 2015\-02\-01 11:00:00 . drwxrwxr\-x user user 0 Sun, 2015\-02\-01 11:00:00 code drwxrwxr\-x user user 0 Sun, 2015\-02\-01 11:00:00 code/myproject \-rw\-rw\-r\-\- user user 1416192 Sun, 2015\-02\-01 11:00:00 code/myproject/file.ext \&... .ft P .fi .UNINDENT .UNINDENT .SH NOTES .sp The following keys are available for \fB\-\-format\fP: .INDENT 0.0 .IP \(bu 2 NEWLINE: OS dependent line separator .IP \(bu 2 NL: alias of NEWLINE .IP \(bu 2 NUL: NUL character for creating print0 / xargs \-0 like output, see barchive/bpath .IP \(bu 2 SPACE .IP \(bu 2 TAB .IP \(bu 2 CR .IP \(bu 2 LF .UNINDENT .sp Keys for listing repository archives: .INDENT 0.0 .IP \(bu 2 archive: archive name interpreted as text (might be missing non\-text characters, see barchive) .IP \(bu 2 name: alias of "archive" .IP \(bu 2 barchive: verbatim archive name, can contain any character except NUL .IP \(bu 2 comment: archive comment interpreted as text (might be missing non\-text characters, see bcomment) .IP \(bu 2 bcomment: verbatim archive comment, can contain any character except NUL .IP \(bu 2 id: internal ID of the archive .IP \(bu 2 start: time (start) of creation of the archive .IP \(bu 2 time: alias of "start" .IP \(bu 2 end: time (end) of creation of the archive .IP \(bu 2 hostname: hostname of host on which this archive was created .IP \(bu 2 username: username of user who created this archive .UNINDENT .sp Keys for listing archive files: .INDENT 0.0 .IP \(bu 2 type .IP \(bu 2 mode .IP \(bu 2 uid .IP \(bu 2 gid .IP \(bu 2 user .IP \(bu 2 group .IP \(bu 2 path: path interpreted as text (might be missing non\-text characters, see bpath) .IP \(bu 2 bpath: verbatim POSIX path, can contain any character except NUL .IP \(bu 2 source: link target for links (identical to linktarget) .IP \(bu 2 linktarget .IP \(bu 2 flags .IP \(bu 2 size .IP \(bu 2 csize: compressed size .IP \(bu 2 dsize: deduplicated size .IP \(bu 2 dcsize: deduplicated compressed size .IP \(bu 2 num_chunks: number of chunks in this file .IP \(bu 2 unique_chunks: number of unique chunks in this file .IP \(bu 2 mtime .IP \(bu 2 ctime .IP \(bu 2 atime .IP \(bu 2 isomtime .IP \(bu 2 isoctime .IP \(bu 2 isoatime .IP \(bu 2 md5 .IP \(bu 2 sha1 .IP \(bu 2 sha224 .IP \(bu 2 sha256 .IP \(bu 2 sha384 .IP \(bu 2 sha512 .IP \(bu 2 archiveid .IP \(bu 2 archivename .IP \(bu 2 extra: prepends {source} with " \-> " for soft links and " link to " for hard links .IP \(bu 2 health: either "healthy" (file ok) or "broken" (if file has all\-zero replacement chunks) .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-info(1)\fP, \fIborg\-diff(1)\fP, \fIborg\-prune(1)\fP, \fIborg\-patterns(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-serve.10000644000175000017500000001467713771325506017564 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-SERVE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-serve \- Start in server mode. This command is usually not used manually. . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] serve [options] .SH DESCRIPTION .sp This command starts a repository server process. This command is usually not used manually. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS optional arguments .INDENT 0.0 .TP .BI \-\-restrict\-to\-path \ PATH restrict repository access to PATH. Can be specified multiple times to allow the client access to several directories. Access to all sub\-directories is granted implicitly; PATH doesn\(aqt need to directly point to a repository. .TP .BI \-\-restrict\-to\-repository \ PATH restrict repository access. Only the repository located at PATH (no sub\-directories are considered) is accessible. Can be specified multiple times to allow the client access to several repositories. Unlike \fB\-\-restrict\-to\-path\fP sub\-directories are not accessible; PATH needs to directly point at a repository location. PATH may be an empty directory or the last element of PATH may not exist, in which case the client may initialize a repository there. .TP .B \-\-append\-only only allow appending to repository segment files .TP .BI \-\-storage\-quota \ QUOTA Override storage quota of the repository (e.g. 5G, 1.5T). When a new repository is initialized, sets the storage quota on the new repository as well. Default: no quota. .UNINDENT .SH EXAMPLES .sp borg serve has special support for ssh forced commands (see \fBauthorized_keys\fP example below): it will detect that you use such a forced command and extract the value of the \fB\-\-restrict\-to\-path\fP option(s). .sp It will then parse the original command that came from the client, makes sure that it is also \fBborg serve\fP and enforce path restriction(s) as given by the forced command. That way, other options given by the client (like \fB\-\-info\fP or \fB\-\-umask\fP) are preserved (and are not fixed by the forced command). .sp Environment variables (such as BORG_HOSTNAME_IS_UNIQUE) contained in the original command sent by the client are \fInot\fP interpreted, but ignored. If BORG_XXX environment variables should be set on the \fBborg serve\fP side, then these must be set in system\-specific locations like \fB/etc/environment\fP or in the forced command itself (example below). .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Allow an SSH keypair to only run borg, and only have access to /path/to/repo. # Use key options to disable unneeded and potentially dangerous SSH functionality. # This will help to secure an automated remote backup system. $ cat ~/.ssh/authorized_keys command="borg serve \-\-restrict\-to\-path /path/to/repo",restrict ssh\-rsa AAAAB3[...] # Set a BORG_XXX environment variable on the "borg serve" side $ cat ~/.ssh/authorized_keys command="export BORG_XXX=value; borg serve [...]",restrict ssh\-rsa [...] .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 The examples above use the \fBrestrict\fP directive. This does automatically block potential dangerous ssh features, even when they are added in a future update. Thus, this option should be preferred. .sp If you\(aqre using openssh\-server < 7.2, however, you have to explicitly specify the ssh features to restrict and cannot simply use the restrict option as it has been introduced in v7.2. We recommend to use \fBno\-port\-forwarding,no\-X11\-forwarding,no\-pty,no\-agent\-forwarding,no\-user\-rc\fP in this case. .UNINDENT .UNINDENT .SS SSH Configuration .sp \fBborg serve\fP\(aqs pipes (\fBstdin\fP/\fBstdout\fP/\fBstderr\fP) are connected to the \fBsshd\fP process on the server side. In the event that the SSH connection between \fBborg serve\fP and the client is disconnected or stuck abnormally (for example, due to a network outage), it can take a long time for \fBsshd\fP to notice the client is disconnected. In the meantime, \fBsshd\fP continues running, and as a result so does the \fBborg serve\fP process holding the lock on the repository. This can cause subsequent \fBborg\fP operations on the remote repository to fail with the error: \fBFailed to create/acquire the lock\fP\&. .sp In order to avoid this, it is recommended to perform the following additional SSH configuration: .sp Either in the client side\(aqs \fB~/.ssh/config\fP file, or in the client\(aqs \fB/etc/ssh/ssh_config\fP file: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C Host backupserver ServerAliveInterval 10 ServerAliveCountMax 30 .ft P .fi .UNINDENT .UNINDENT .sp Replacing \fBbackupserver\fP with the hostname, FQDN or IP address of the borg server. .sp This will cause the client to send a keepalive to the server every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the ssh client process will be terminated, causing the borg process to terminate gracefully. .sp On the server side\(aqs \fBsshd\fP configuration file (typically \fB/etc/ssh/sshd_config\fP): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C ClientAliveInterval 10 ClientAliveCountMax 30 .ft P .fi .UNINDENT .UNINDENT .sp This will cause the server to send a keep alive to the client every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the server\(aqs sshd process will be terminated, causing the \fBborg serve\fP process to terminate gracefully and release the lock on the repository. .sp If you then run borg commands with \fB\-\-lock\-wait 600\fP, this gives sufficient time for the borg serve processes to terminate after the SSH connection is torn down after the 300 second wait for the keepalives to fail. .sp You may, of course, modify the timeout values demonstrated above to values that suit your environment and use case. .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-key.10000644000175000017500000000233513771325506017214 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-KEY 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-key \- Manage a keyfile or repokey of a repository . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .nf borg [common options] key export ... borg [common options] key import ... borg [common options] key change\-passphrase ... borg [common options] key migrate\-to\-repokey ... .fi .sp .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-key\-export(1)\fP, \fIborg\-key\-import(1)\fP, \fIborg\-key\-change\-passphrase(1)\fP, \fIborg\-key\-migrate\-to\-repokey(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-key-migrate-to-repokey.10000644000175000017500000000364113771325506022737 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-KEY-MIGRATE-TO-REPOKEY 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-key-migrate-to-repokey \- Migrate passphrase -> repokey . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] key migrate\-to\-repokey [options] [REPOSITORY] .SH DESCRIPTION .sp This command migrates a repository from passphrase mode (removed in Borg 1.0) to repokey mode. .sp You will be first asked for the repository passphrase (to open it in passphrase mode). This is the same passphrase as you used to use for this repo before 1.0. .sp It will then derive the different secrets from this passphrase. .sp Then you will be asked for a new passphrase (twice, for safety). This passphrase will be used to protect the repokey (which contains these same secrets in encrypted form). You may use the same passphrase as you used to use, but you may also use a different one. .sp After migrating to repokey mode, you can change the passphrase at any time. But please note: the secrets will always stay the same and they could always be derived from your (old) passphrase\-mode passphrase. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .sp REPOSITORY .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-export-tar.10000644000175000017500000001010713771325506020525 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-EXPORT-TAR 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-export-tar \- Export archive contents as a tarball . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] export\-tar [options] ARCHIVE FILE [PATH...] .SH DESCRIPTION .sp This command creates a tarball from an archive. .sp When giving \(aq\-\(aq as the output FILE, Borg will write a tar stream to standard output. .sp By default (\fB\-\-tar\-filter=auto\fP) Borg will detect whether the FILE should be compressed based on its file extension and pipe the tarball through an appropriate filter before writing it to FILE: .INDENT 0.0 .IP \(bu 2 \&.tar.gz: gzip .IP \(bu 2 \&.tar.bz2: bzip2 .IP \(bu 2 \&.tar.xz: xz .UNINDENT .sp Alternatively a \fB\-\-tar\-filter\fP program may be explicitly specified. It should read the uncompressed tar stream from stdin and write a compressed/filtered tar stream to stdout. .sp The generated tarball uses the GNU tar format. .sp export\-tar is a lossy conversion: BSD flags, ACLs, extended attributes (xattrs), atime and ctime are not exported. Timestamp resolution is limited to whole seconds, not the nanosecond resolution otherwise supported by Borg. .sp A \fB\-\-sparse\fP option (as found in borg extract) is not supported. .sp By default the entire archive is extracted but a subset of files and directories can be selected by passing a list of \fBPATHs\fP as arguments. The file selection can further be restricted by using the \fB\-\-exclude\fP option. .sp See the output of the "borg help patterns" command for more help on exclude patterns. .sp \fB\-\-progress\fP can be slower than no progress display, since it makes one additional pass over the archive metadata. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B ARCHIVE archive to export .TP .B FILE output tar file. "\-" to write to stdout instead. .TP .B PATH paths to extract; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-tar\-filter filter program to pipe data through .TP .B \-\-list output verbose list of items (files, dirs, ...) .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .TP .BI \-\-strip\-components \ NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # export as uncompressed tar $ borg export\-tar /path/to/repo::Monday Monday.tar # exclude some types, compress using gzip $ borg export\-tar /path/to/repo::Monday Monday.tar.gz \-\-exclude \(aq*.so\(aq # use higher compression level with gzip $ borg export\-tar \-\-tar\-filter="gzip \-9" testrepo::linux Monday.tar.gz # export a tar, but instead of storing it on disk, # upload it to a remote site using curl. $ borg export\-tar /path/to/repo::Monday \- | curl \-\-data\-binary @\- https://somewhere/to/POST # remote extraction via "tarpipe" $ borg export\-tar /path/to/repo::Monday \- | ssh somewhere "cd extracted; tar x" .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-rename.10000644000175000017500000000313513771325506017672 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-RENAME 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-rename \- Rename an existing archive . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] rename [options] ARCHIVE NEWNAME .SH DESCRIPTION .sp This command renames an archive in the repository. .sp This results in a different archive ID. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B ARCHIVE archive to rename .TP .B NEWNAME the new archive name to use .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ borg create /path/to/repo::archivename ~ $ borg list /path/to/repo archivename Mon, 2016\-02\-15 19:50:19 $ borg rename /path/to/repo::archivename newname $ borg list /path/to/repo newname Mon, 2016\-02\-15 19:50:19 .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-info.10000644000175000017500000001271213771325506017357 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-INFO 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-info \- Show archive details such as disk space used . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] info [options] [REPOSITORY_OR_ARCHIVE] .SH DESCRIPTION .sp This command displays detailed information about the specified archive or repository. .sp Please note that the deduplicated sizes of the individual archives do not add up to the deduplicated size of the repository ("all archives"), because the two are meaning different things: .sp This archive / deduplicated size = amount of data stored ONLY for this archive = unique chunks of this archive. All archives / deduplicated size = amount of data stored in the repo = all chunks in the repository. .sp Borg archives can only contain a limited amount of file metadata. The size of an archive relative to this limit depends on a number of factors, mainly the number of files, the lengths of paths and other metadata stored for files. This is shown as \fIutilization of maximum supported archive size\fP\&. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE repository or archive to display information about .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-json format output as JSON .UNINDENT .SS Archive filters .INDENT 0.0 .TP .BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP .BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP .BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N consider first N archives after other filters were applied .TP .BI \-\-last \ N consider last N archives after other filters were applied .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ borg info /path/to/repo::2017\-06\-29T11:00\-srv Archive name: 2017\-06\-29T11:00\-srv Archive fingerprint: b2f1beac2bd553b34e06358afa45a3c1689320d39163890c5bbbd49125f00fe5 Comment: Hostname: myhostname Username: root Time (start): Thu, 2017\-06\-29 11:03:07 Time (end): Thu, 2017\-06\-29 11:03:13 Duration: 5.66 seconds Number of files: 17037 Command line: /usr/sbin/borg create /path/to/repo::2017\-06\-29T11:00\-srv /srv Utilization of max. archive size: 0% \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- Original size Compressed size Deduplicated size This archive: 12.53 GB 12.49 GB 1.62 kB All archives: 121.82 TB 112.41 TB 215.42 GB Unique chunks Total chunks Chunk index: 1015213 626934122 $ borg info /path/to/repo \-\-last 1 Archive name: 2017\-06\-29T11:00\-srv Archive fingerprint: b2f1beac2bd553b34e06358afa45a3c1689320d39163890c5bbbd49125f00fe5 Comment: Hostname: myhostname Username: root Time (start): Thu, 2017\-06\-29 11:03:07 Time (end): Thu, 2017\-06\-29 11:03:13 Duration: 5.66 seconds Number of files: 17037 Command line: /usr/sbin/borg create /path/to/repo::2017\-06\-29T11:00\-srv /srv Utilization of max. archive size: 0% \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- Original size Compressed size Deduplicated size This archive: 12.53 GB 12.49 GB 1.62 kB All archives: 121.82 TB 112.41 TB 215.42 GB Unique chunks Total chunks Chunk index: 1015213 626934122 $ borg info /path/to/repo Repository ID: d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 Location: /path/to/repo Encrypted: Yes (repokey) Cache: /root/.cache/borg/d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 Security dir: /root/.config/borg/security/d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- Original size Compressed size Deduplicated size All archives: 121.82 TB 112.41 TB 215.42 GB Unique chunks Total chunks Chunk index: 1015213 626934122 .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-list(1)\fP, \fIborg\-diff(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-diff.10000644000175000017500000000673413771325506017343 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-DIFF 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-diff \- Diff contents of two archives . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] diff [options] REPO::ARCHIVE1 ARCHIVE2 [PATH...] .SH DESCRIPTION .sp This command finds differences (file contents, user/group/mode) between archives. .sp A repository location and an archive name must be specified for REPO::ARCHIVE1. ARCHIVE2 is just another archive name in same repository (no repository location allowed). .sp For archives created with Borg 1.1 or newer diff automatically detects whether the archives are created with the same chunker params. If so, only chunk IDs are compared, which is very fast. .sp For archives prior to Borg 1.1 chunk contents are compared by default. If you did not create the archives with different chunker params, pass \fB\-\-same\-chunker\-params\fP\&. Note that the chunker params changed from Borg 0.xx to 1.0. .sp See the output of the "borg help patterns" command for more help on exclude patterns. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPO::ARCHIVE1 repository location and ARCHIVE1 name .TP .B ARCHIVE2 ARCHIVE2 name (no repository location allowed) .TP .B PATH paths of items inside the archives to compare; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-\-numeric\-owner only consider numeric user and group identifiers .TP .B \-\-same\-chunker\-params Override check of chunker parameters. .TP .B \-\-sort Sort the output lines by file path. .UNINDENT .SS Exclusion options .INDENT 0.0 .TP .BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .UNINDENT .SH EXAMPLES .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ borg init \-e=none testrepo $ mkdir testdir $ cd testdir $ echo asdf > file1 $ dd if=/dev/urandom bs=1M count=4 > file2 $ touch file3 $ borg create ../testrepo::archive1 . $ chmod a+x file1 $ echo "something" >> file2 $ borg create ../testrepo::archive2 . $ rm file3 $ touch file4 $ borg create ../testrepo::archive3 . $ cd .. $ borg diff testrepo::archive1 archive2 [\-rw\-r\-\-r\-\- \-> \-rwxr\-xr\-x] file1 +135 B \-252 B file2 $ borg diff testrepo::archive2 archive3 added 0 B file4 removed 0 B file3 $ borg diff testrepo::archive1 archive3 [\-rw\-r\-\-r\-\- \-> \-rwxr\-xr\-x] file1 +135 B \-252 B file2 added 0 B file4 removed 0 B file3 .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-prune.10000644000175000017500000001511513771325506017555 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-PRUNE 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-prune \- Prune repository archives according to specified rules . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] prune [options] [REPOSITORY] .SH DESCRIPTION .sp The prune command prunes a repository by deleting all archives not matching any of the specified retention options. This command is normally used by automated backup scripts wanting to keep a certain number of historic backups. .sp Also, prune automatically removes checkpoint archives (incomplete archives left behind by interrupted backup runs) except if the checkpoint is the latest archive (and thus still needed). Checkpoint archives are not considered when comparing archive counts against the retention limits (\fB\-\-keep\-X\fP). .sp If a prefix is set with \-P, then only archives that start with the prefix are considered for deletion and only those archives count towards the totals specified by the rules. Otherwise, \fIall\fP archives in the repository are candidates for deletion! There is no automatic distinction between archives representing different contents. These need to be distinguished by specifying matching prefixes. .sp If you have multiple sequences of archives with different data sets (e.g. from different machines) in one shared repository, use one prune call per data set that matches only the respective archives using the \-P option. .sp The \fB\-\-keep\-within\fP option takes an argument of the form "", where char is "H", "d", "w", "m", "y". For example, \fB\-\-keep\-within 2d\fP means to keep all archives that were created within the past 48 hours. "1m" is taken to mean "31d". The archives kept with this option do not count towards the totals specified by any other options. .sp A good procedure is to thin out more and more the older your backups get. As an example, \fB\-\-keep\-daily 7\fP means to keep the latest backup on each day, up to 7 most recent days with backups (days without backups do not count). The rules are applied from secondly to yearly, and backups selected by previous rules do not count towards those of later rules. The time that each backup starts is used for pruning purposes. Dates and times are interpreted in the local timezone, and weeks go from Monday to Sunday. Specifying a negative number of archives to keep means that there is no limit. .sp The \fB\-\-keep\-last N\fP option is doing the same as \fB\-\-keep\-secondly N\fP (and it will keep the last N archives under the assumption that you do not create more than one backup archive in the same second). .sp When using \fB\-\-stats\fP, you will get some statistics about how much data was deleted \- the "Deleted data" deduplicated size there is most interesting as that is how much your repository will shrink. Please note that the "All archives" stats refer to the state after pruning. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY repository to prune .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-n\fP,\fB \-\-dry\-run do not change repository .TP .B \-\-force force pruning of corrupted archives, use \fB\-\-force \-\-force\fP in case \fB\-\-force\fP does not work. .TP .B \-s\fP,\fB \-\-stats print statistics for the deleted archive .TP .B \-\-list output verbose list of archives it keeps/prunes .TP .BI \-\-keep\-within \ INTERVAL keep all archives within this time interval .TP .B \-\-keep\-last\fP,\fB \-\-keep\-secondly number of secondly archives to keep .TP .B \-\-keep\-minutely number of minutely archives to keep .TP .B \-H\fP,\fB \-\-keep\-hourly number of hourly archives to keep .TP .B \-d\fP,\fB \-\-keep\-daily number of daily archives to keep .TP .B \-w\fP,\fB \-\-keep\-weekly number of weekly archives to keep .TP .B \-m\fP,\fB \-\-keep\-monthly number of monthly archives to keep .TP .B \-y\fP,\fB \-\-keep\-yearly number of yearly archives to keep .TP .B \-\-save\-space work slower, but using less space .UNINDENT .SS Archive filters .INDENT 0.0 .TP .BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP .BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .UNINDENT .SH EXAMPLES .sp Be careful, prune is a potentially dangerous command, it will remove backup archives. .sp The default of prune is to apply to \fBall archives in the repository\fP unless you restrict its operation to a subset of the archives using \fB\-\-prefix\fP\&. When using \fB\-\-prefix\fP, be careful to choose a good prefix \- e.g. do not use a prefix "foo" if you do not also want to match "foobar". .sp It is strongly recommended to always run \fBprune \-v \-\-list \-\-dry\-run ...\fP first so you will see what it would do without it actually doing anything. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Keep 7 end of day and 4 additional end of week archives. # Do a dry\-run without actually deleting anything. $ borg prune \-v \-\-list \-\-dry\-run \-\-keep\-daily=7 \-\-keep\-weekly=4 /path/to/repo # Same as above but only apply to archive names starting with the hostname # of the machine followed by a "\-" character: $ borg prune \-v \-\-list \-\-keep\-daily=7 \-\-keep\-weekly=4 \-\-prefix=\(aq{hostname}\-\(aq /path/to/repo # Keep 7 end of day, 4 additional end of week archives, # and an end of month archive for every month: $ borg prune \-v \-\-list \-\-keep\-daily=7 \-\-keep\-weekly=4 \-\-keep\-monthly=\-1 /path/to/repo # Keep all backups in the last 10 days, 4 additional end of week archives, # and an end of month archive for every month: $ borg prune \-v \-\-list \-\-keep\-within=10d \-\-keep\-weekly=4 \-\-keep\-monthly=\-1 /path/to/repo .ft P .fi .UNINDENT .UNINDENT .sp There is also a visualized prune example in \fBdocs/misc/prune\-example.txt\fP\&. .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-config.10000644000175000017500000000530013771325506017664 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-CONFIG 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-config \- get, set, and delete values in a repository or cache config file . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] config [options] [REPOSITORY] [NAME] [VALUE] .SH DESCRIPTION .sp This command gets and sets options in a local repository or cache config file. For security reasons, this command only works on local repositories. .sp To delete a config value entirely, use \fB\-\-delete\fP\&. To list the values of the configuration file or the default values, use \fB\-\-list\fP\&. To get and existing key, pass only the key name. To set a key, pass both the key name and the new value. Keys can be specified in the format "section.name" or simply "name"; the section will default to "repository" and "cache" for the repo and cache configs, respectively. .sp By default, borg config manipulates the repository config file. Using \fB\-\-cache\fP edits the repository cache\(aqs config file instead. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP .B REPOSITORY repository to configure .TP .B NAME name of config key .TP .B VALUE new value for key .UNINDENT .SS optional arguments .INDENT 0.0 .TP .B \-c\fP,\fB \-\-cache get and set values from the repo cache .TP .B \-d\fP,\fB \-\-delete delete the key from the config file .TP .B \-l\fP,\fB \-\-list list the configuration of the repo .UNINDENT .SH EXAMPLES .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 The repository & cache config files are some of the only directly manipulable parts of a repository that aren\(aqt versioned or backed up, so be careful when making changes! .UNINDENT .UNINDENT .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # find cache directory $ cd ~/.cache/borg/$(borg config /path/to/repo id) # reserve some space $ borg config /path/to/repo additional_free_space 2G # make a repo append\-only $ borg config /path/to/repo append_only 1 .ft P .fi .UNINDENT .UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg.10000644000175000017500000007226113771325506016433 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG 1 "2017-02-05" "" "borg backup tool" .SH NAME borg \- deduplicating and encrypting backup tool . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp borg [common options] [options] [arguments] .SH DESCRIPTION .\" we don't include the README.rst here since we want to keep this terse. . .sp BorgBackup (short: Borg) is a deduplicating backup program. Optionally, it supports compression and authenticated encryption. .sp The main goal of Borg is to provide an efficient and secure way to backup data. The data deduplication technique used makes Borg suitable for daily backups since only changes are stored. The authenticated encryption technique makes it suitable for backups to not fully trusted targets. .sp Borg stores a set of files in an \fIarchive\fP\&. A \fIrepository\fP is a collection of \fIarchives\fP\&. The format of repositories is Borg\-specific. Borg does not distinguish archives from each other in any way other than their name, it does not matter when or where archives were created (e.g. different hosts). .SH EXAMPLES .SS A step\-by\-step example .INDENT 0.0 .IP 1. 3 Before a backup can be made a repository has to be initialized: .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C $ borg init \-\-encryption=repokey /path/to/repo .ft P .fi .UNINDENT .UNINDENT .IP 2. 3 Backup the \fB~/src\fP and \fB~/Documents\fP directories into an archive called \fIMonday\fP: .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C $ borg create /path/to/repo::Monday ~/src ~/Documents .ft P .fi .UNINDENT .UNINDENT .IP 3. 3 The next day create a new archive called \fITuesday\fP: .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C $ borg create \-\-stats /path/to/repo::Tuesday ~/src ~/Documents .ft P .fi .UNINDENT .UNINDENT .sp This backup will be a lot quicker and a lot smaller since only new never before seen data is stored. The \fB\-\-stats\fP option causes Borg to output statistics about the newly created archive such as the amount of unique data (not shared with other archives): .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- Archive name: Tuesday Archive fingerprint: bd31004d58f51ea06ff735d2e5ac49376901b21d58035f8fb05dbf866566e3c2 Time (start): Tue, 2016\-02\-16 18:15:11 Time (end): Tue, 2016\-02\-16 18:15:11 Duration: 0.19 seconds Number of files: 127 \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- Original size Compressed size Deduplicated size This archive: 4.16 MB 4.17 MB 26.78 kB All archives: 8.33 MB 8.34 MB 4.19 MB Unique chunks Total chunks Chunk index: 132 261 \-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- .ft P .fi .UNINDENT .UNINDENT .IP 4. 3 List all archives in the repository: .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C $ borg list /path/to/repo Monday Mon, 2016\-02\-15 19:14:44 Tuesday Tue, 2016\-02\-16 19:15:11 .ft P .fi .UNINDENT .UNINDENT .IP 5. 3 List the contents of the \fIMonday\fP archive: .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C $ borg list /path/to/repo::Monday drwxr\-xr\-x user group 0 Mon, 2016\-02\-15 18:22:30 home/user/Documents \-rw\-r\-\-r\-\- user group 7961 Mon, 2016\-02\-15 18:22:30 home/user/Documents/Important.doc \&... .ft P .fi .UNINDENT .UNINDENT .IP 6. 3 Restore the \fIMonday\fP archive by extracting the files relative to the current directory: .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C $ borg extract /path/to/repo::Monday .ft P .fi .UNINDENT .UNINDENT .IP 7. 3 Recover disk space by manually deleting the \fIMonday\fP archive: .INDENT 3.0 .INDENT 3.5 .sp .nf .ft C $ borg delete /path/to/repo::Monday .ft P .fi .UNINDENT .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 Borg is quiet by default (it works on WARNING log level). You can use options like \fB\-\-progress\fP or \fB\-\-list\fP to get specific reports during command execution. You can also add the \fB\-v\fP (or \fB\-\-verbose\fP or \fB\-\-info\fP) option to adjust the log level to INFO to get other informational messages. .UNINDENT .UNINDENT .SH NOTES .SS Positional Arguments and Options: Order matters .sp Borg only supports taking options (\fB\-s\fP and \fB\-\-progress\fP in the example) to the left or right of all positional arguments (\fBrepo::archive\fP and \fBpath\fP in the example), but not in between them: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C borg create \-s \-\-progress repo::archive path # good and preferred borg create repo::archive path \-s \-\-progress # also works borg create \-s repo::archive path \-\-progress # works, but ugly borg create repo::archive \-s \-\-progress path # BAD .ft P .fi .UNINDENT .UNINDENT .sp This is due to a problem in the argparse module: \fI\%https://bugs.python.org/issue15112\fP .SS Repository URLs .sp \fBLocal filesystem\fP (or locally mounted network filesystem): .sp \fB/path/to/repo\fP \- filesystem path to repo directory, absolute path .sp \fBpath/to/repo\fP \- filesystem path to repo directory, relative path .sp Also, stuff like \fB~/path/to/repo\fP or \fB~other/path/to/repo\fP works (this is expanded by your shell). .sp Note: you may also prepend a \fBfile://\fP to a filesystem path to get URL style. .sp \fBRemote repositories\fP accessed via ssh \fI\%user@host\fP: .sp \fBuser@host:/path/to/repo\fP \- remote repo, absolute path .sp \fBssh://user@host:port/path/to/repo\fP \- same, alternative syntax, port can be given .sp \fBRemote repositories with relative paths\fP can be given using this syntax: .sp \fBuser@host:path/to/repo\fP \- path relative to current directory .sp \fBuser@host:~/path/to/repo\fP \- path relative to user\(aqs home directory .sp \fBuser@host:~other/path/to/repo\fP \- path relative to other\(aqs home directory .sp Note: giving \fBuser@host:/./path/to/repo\fP or \fBuser@host:/~/path/to/repo\fP or \fBuser@host:/~other/path/to/repo\fP is also supported, but not required here. .sp \fBRemote repositories with relative paths, alternative syntax with port\fP: .sp \fBssh://user@host:port/./path/to/repo\fP \- path relative to current directory .sp \fBssh://user@host:port/~/path/to/repo\fP \- path relative to user\(aqs home directory .sp \fBssh://user@host:port/~other/path/to/repo\fP \- path relative to other\(aqs home directory .sp If you frequently need the same repo URL, it is a good idea to set the \fBBORG_REPO\fP environment variable to set a default for the repo URL: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C export BORG_REPO=\(aqssh://user@host:port/path/to/repo\(aq .ft P .fi .UNINDENT .UNINDENT .sp Then just leave away the repo URL if only a repo URL is needed and you want to use the default \- it will be read from BORG_REPO then. .sp Use \fB::\fP syntax to give the repo URL when syntax requires giving a positional argument for the repo (e.g. \fBborg mount :: /mnt\fP). .SS Repository / Archive Locations .sp Many commands want either a repository (just give the repo URL, see above) or an archive location, which is a repo URL followed by \fB::archive_name\fP\&. .sp Archive names must not contain the \fB/\fP (slash) character. For simplicity, maybe also avoid blanks or other characters that have special meaning on the shell or in a filesystem (borg mount will use the archive name as directory name). .sp If you have set BORG_REPO (see above) and an archive location is needed, use \fB::archive_name\fP \- the repo URL part is then read from BORG_REPO. .SS Logging .sp Borg writes all log output to stderr by default. But please note that something showing up on stderr does \fInot\fP indicate an error condition just because it is on stderr. Please check the log levels of the messages and the return code of borg for determining error, warning or success conditions. .sp If you want to capture the log output to a file, just redirect it: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C borg create repo::archive myfiles 2>> logfile .ft P .fi .UNINDENT .UNINDENT .sp Custom logging configurations can be implemented via BORG_LOGGING_CONF. .sp The log level of the builtin logging configuration defaults to WARNING. This is because we want Borg to be mostly silent and only output warnings, errors and critical messages, unless output has been requested by supplying an option that implies output (e.g. \fB\-\-list\fP or \fB\-\-progress\fP). .sp Log levels: DEBUG < INFO < WARNING < ERROR < CRITICAL .sp Use \fB\-\-debug\fP to set DEBUG log level \- to get debug, info, warning, error and critical level output. .sp Use \fB\-\-info\fP (or \fB\-v\fP or \fB\-\-verbose\fP) to set INFO log level \- to get info, warning, error and critical level output. .sp Use \fB\-\-warning\fP (default) to set WARNING log level \- to get warning, error and critical level output. .sp Use \fB\-\-error\fP to set ERROR log level \- to get error and critical level output. .sp Use \fB\-\-critical\fP to set CRITICAL log level \- to get critical level output. .sp While you can set misc. log levels, do not expect that every command will give different output on different log levels \- it\(aqs just a possibility. .sp \fBWARNING:\fP .INDENT 0.0 .INDENT 3.5 Options \fB\-\-critical\fP and \fB\-\-error\fP are provided for completeness, their usage is not recommended as you might miss important information. .UNINDENT .UNINDENT .SS Return codes .sp Borg can exit with the following return codes (rc): .TS center; |l|l|. _ T{ Return code T} T{ Meaning T} _ T{ 0 T} T{ success (logged as INFO) T} _ T{ 1 T} T{ warning (operation reached its normal end, but there were warnings \-\- you should check the log, logged as WARNING) T} _ T{ 2 T} T{ error (like a fatal error, a local or remote exception, the operation did not reach its normal end, logged as ERROR) T} _ T{ 128+N T} T{ killed by signal N (e.g. 137 == kill \-9) T} _ .TE .sp If you use \fB\-\-show\-rc\fP, the return code is also logged at the indicated level as the last log entry. .SS Environment Variables .sp Borg uses some environment variables for automation: .INDENT 0.0 .TP .B General: .INDENT 7.0 .TP .B BORG_REPO When set, use the value to give the default repository location. If a command needs an archive parameter, you can abbreviate as \fB::archive\fP\&. If a command needs a repository parameter, you can either leave it away or abbreviate as \fB::\fP, if a positional parameter is required. .TP .B BORG_PASSPHRASE When set, use the value to answer the passphrase question for encrypted repositories. It is used when a passphrase is needed to access an encrypted repo as well as when a new passphrase should be initially set when initializing an encrypted repo. See also BORG_NEW_PASSPHRASE. .TP .B BORG_PASSCOMMAND When set, use the standard output of the command (trailing newlines are stripped) to answer the passphrase question for encrypted repositories. It is used when a passphrase is needed to access an encrypted repo as well as when a new passphrase should be initially set when initializing an encrypted repo. Note that the command is executed without a shell. So variables, like \fB$HOME\fP will work, but \fB~\fP won\(aqt. If BORG_PASSPHRASE is also set, it takes precedence. See also BORG_NEW_PASSPHRASE. .TP .B BORG_PASSPHRASE_FD When set, specifies a file descriptor to read a passphrase from. Programs starting borg may choose to open an anonymous pipe and use it to pass a passphrase. This is safer than passing via BORG_PASSPHRASE, because on some systems (e.g. Linux) environment can be examined by other processes. If BORG_PASSPHRASE or BORG_PASSCOMMAND are also set, they take precedence. .TP .B BORG_NEW_PASSPHRASE When set, use the value to answer the passphrase question when a \fBnew\fP passphrase is asked for. This variable is checked first. If it is not set, BORG_PASSPHRASE and BORG_PASSCOMMAND will also be checked. Main usecase for this is to fully automate \fBborg change\-passphrase\fP\&. .TP .B BORG_DISPLAY_PASSPHRASE When set, use the value to answer the "display the passphrase for verification" question when defining a new passphrase for encrypted repositories. .TP .B BORG_HOSTNAME_IS_UNIQUE=no Borg assumes that it can derive a unique hostname / identity (see \fBborg debug info\fP). If this is not the case or you do not want Borg to automatically remove stale locks, set this to \fIno\fP\&. .TP .B BORG_HOST_ID Borg usually computes a host id from the FQDN plus the results of \fBuuid.getnode()\fP (which usually returns a unique id based on the MAC address of the network interface. Except if that MAC happens to be all\-zero \- in that case it returns a random value, which is not what we want (because it kills automatic stale lock removal). So, if you have a all\-zero MAC address or other reasons to better externally control the host id, just set this environment variable to a unique value. If all your FQDNs are unique, you can just use the FQDN. If not, use \fI\%fqdn@uniqueid\fP\&. .TP .B BORG_LOGGING_CONF When set, use the given filename as \fI\%INI\fP\-style logging configuration. A basic example conf can be found at \fBdocs/misc/logging.conf\fP\&. .TP .B BORG_RSH When set, use this command instead of \fBssh\fP\&. This can be used to specify ssh options, such as a custom identity file \fBssh \-i /path/to/private/key\fP\&. See \fBman ssh\fP for other options. Using the \fB\-\-rsh CMD\fP commandline option overrides the environment variable. .TP .B BORG_REMOTE_PATH When set, use the given path as borg executable on the remote (defaults to "borg" if unset). Using \fB\-\-remote\-path PATH\fP commandline option overrides the environment variable. .TP .B BORG_FILES_CACHE_SUFFIX When set to a value at least one character long, instructs borg to use a specifically named (based on the suffix) alternative files cache. This can be used to avoid loading and saving cache entries for backup sources other than the current sources. .TP .B BORG_FILES_CACHE_TTL When set to a numeric value, this determines the maximum "time to live" for the files cache entries (default: 20). The files cache is used to quickly determine whether a file is unchanged. The FAQ explains this more detailed in: \fIalways_chunking\fP .TP .B BORG_SHOW_SYSINFO When set to no (default: yes), system information (like OS, Python version, ...) in exceptions is not shown. Please only use for good reasons as it makes issues harder to analyze. .TP .B BORG_WORKAROUNDS A list of comma separated strings that trigger workarounds in borg, e.g. to work around bugs in other software. .sp Currently known strings are: .INDENT 7.0 .TP .B basesyncfile Use the more simple BaseSyncFile code to avoid issues with sync_file_range. You might need this to run borg on WSL (Windows Subsystem for Linux) or in systemd.nspawn containers on some architectures (e.g. ARM). Using this does not affect data safety, but might result in a more bursty write to disk behaviour (not continuously streaming to disk). .UNINDENT .UNINDENT .TP .B Some automatic "answerers" (if set, they automatically answer confirmation questions): .INDENT 7.0 .TP .B BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=no (or =yes) For "Warning: Attempting to access a previously unknown unencrypted repository" .TP .B BORG_RELOCATED_REPO_ACCESS_IS_OK=no (or =yes) For "Warning: The repository at location ... was previously located at ..." .TP .B BORG_CHECK_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) For "This is a potentially dangerous function..." (check \-\-repair) .TP .B BORG_DELETE_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) For "You requested to completely DELETE the repository \fIincluding\fP all archives it contains:" .UNINDENT .sp Note: answers are case sensitive. setting an invalid answer value might either give the default answer or ask you interactively, depending on whether retries are allowed (they by default are allowed). So please test your scripts interactively before making them a non\-interactive script. .UNINDENT .INDENT 0.0 .TP .B Directories and files: .INDENT 7.0 .TP .B BORG_BASE_DIR Defaults to \fB$HOME\fP or \fB~$USER\fP or \fB~\fP (in that order). If you want to move all borg\-specific folders to a custom path at once, all you need to do is to modify \fBBORG_BASE_DIR\fP: the other paths for cache, config etc. will adapt accordingly (assuming you didn\(aqt set them to a different custom value). .TP .B BORG_CACHE_DIR Defaults to \fB$BORG_BASE_DIR/.cache/borg\fP\&. If \fBBORG_BASE_DIR\fP is not explicitly set while \fI\%XDG env var\fP \fBXDG_CACHE_HOME\fP is set, then \fB$XDG_CACHE_HOME/borg\fP is being used instead. This directory contains the local cache and might need a lot of space for dealing with big repositories. Make sure you\(aqre aware of the associated security aspects of the cache location: \fIcache_security\fP .TP .B BORG_CONFIG_DIR Defaults to \fB$BORG_BASE_DIR/.config/borg\fP\&. If \fBBORG_BASE_DIR\fP is not explicitly set while \fI\%XDG env var\fP \fBXDG_CONFIG_HOME\fP is set, then \fB$XDG_CONFIG_HOME/borg\fP is being used instead. This directory contains all borg configuration directories, see the FAQ for a security advisory about the data in this directory: \fIhome_config_borg\fP .TP .B BORG_SECURITY_DIR Defaults to \fB$BORG_CONFIG_DIR/security\fP\&. This directory contains information borg uses to track its usage of NONCES ("numbers used once" \- usually in encryption context) and other security relevant data. .TP .B BORG_KEYS_DIR Defaults to \fB$BORG_CONFIG_DIR/keys\fP\&. This directory contains keys for encrypted repositories. .TP .B BORG_KEY_FILE When set, use the given filename as repository key file. .TP .B TMPDIR This is where temporary files are stored (might need a lot of temporary space for some operations), see \fI\%tempfile\fP for details. .UNINDENT .TP .B Building: .INDENT 7.0 .TP .B BORG_OPENSSL_PREFIX Adds given OpenSSL header file directory to the default locations (setup.py). .TP .B BORG_LIBLZ4_PREFIX Adds given prefix directory to the default locations. If a \(aqinclude/lz4.h\(aq is found Borg will be linked against the system liblz4 instead of a bundled implementation. (setup.py) .TP .B BORG_LIBB2_PREFIX Adds given prefix directory to the default locations. If a \(aqinclude/blake2.h\(aq is found Borg will be linked against the system libb2 instead of a bundled implementation. (setup.py) .TP .B BORG_LIBZSTD_PREFIX Adds given prefix directory to the default locations. If a \(aqinclude/zstd.h\(aq is found Borg will be linked against the system libzstd instead of a bundled implementation. (setup.py) .UNINDENT .UNINDENT .sp Please note: .INDENT 0.0 .IP \(bu 2 Be very careful when using the "yes" sayers, the warnings with prompt exist for your / your data\(aqs security/safety. .IP \(bu 2 Also be very careful when putting your passphrase into a script, make sure it has appropriate file permissions (e.g. mode 600, root:root). .UNINDENT .SS File systems .sp We strongly recommend against using Borg (or any other database\-like software) on non\-journaling file systems like FAT, since it is not possible to assume any consistency in case of power failures (or a sudden disconnect of an external drive or similar failures). .sp While Borg uses a data store that is resilient against these failures when used on journaling file systems, it is not possible to guarantee this with some hardware \-\- independent of the software used. We don\(aqt know a list of affected hardware. .sp If you are suspicious whether your Borg repository is still consistent and readable after one of the failures mentioned above occurred, run \fBborg check \-\-verify\-data\fP to make sure it is consistent. Requirements for Borg repository file systems.INDENT 0.0 .IP \(bu 2 Long file names .IP \(bu 2 At least three directory levels with short names .IP \(bu 2 Typically, file sizes up to a few hundred MB. Large repositories may require large files (>2 GB). .IP \(bu 2 Up to 1000 files per directory (10000 for repositories initialized with Borg 1.0) .IP \(bu 2 mkdir(2) should be atomic, since it is used for locking .IP \(bu 2 Hardlinks are needed for \fIborg_upgrade\fP (if \fB\-\-inplace\fP option is not used). Also hardlinks are used for more safe and secure file updating (e.g. of the repo config file), but the code tries to work also if hardlinks are not supported. .UNINDENT .SS Units .sp To display quantities, Borg takes care of respecting the usual conventions of scale. Disk sizes are displayed in \fI\%decimal\fP, using powers of ten (so \fBkB\fP means 1000 bytes). For memory usage, \fI\%binary prefixes\fP are used, and are indicated using the \fI\%IEC binary prefixes\fP, using powers of two (so \fBKiB\fP means 1024 bytes). .SS Date and Time .sp We format date and time conforming to ISO\-8601, that is: YYYY\-MM\-DD and HH:MM:SS (24h clock). .sp For more information about that, see: \fI\%https://xkcd.com/1179/\fP .sp Unless otherwise noted, we display local date and time. Internally, we store and process date and time as UTC. .SS Resource Usage .sp Borg might use a lot of resources depending on the size of the data set it is dealing with. .sp If one uses Borg in a client/server way (with a ssh: repository), the resource usage occurs in part on the client and in another part on the server. .sp If one uses Borg as a single process (with a filesystem repo), all the resource usage occurs in that one process, so just add up client + server to get the approximate resource usage. .INDENT 0.0 .TP .B CPU client: .INDENT 7.0 .IP \(bu 2 \fBborg create:\fP does chunking, hashing, compression, crypto (high CPU usage) .IP \(bu 2 \fBchunks cache sync:\fP quite heavy on CPU, doing lots of hashtable operations. .IP \(bu 2 \fBborg extract:\fP crypto, decompression (medium to high CPU usage) .IP \(bu 2 \fBborg check:\fP similar to extract, but depends on options given. .IP \(bu 2 \fBborg prune / borg delete archive:\fP low to medium CPU usage .IP \(bu 2 \fBborg delete repo:\fP done on the server .UNINDENT .sp It won\(aqt go beyond 100% of 1 core as the code is currently single\-threaded. Especially higher zlib and lzma compression levels use significant amounts of CPU cycles. Crypto might be cheap on the CPU (if hardware accelerated) or expensive (if not). .TP .B CPU server: It usually doesn\(aqt need much CPU, it just deals with the key/value store (repository) and uses the repository index for that. .sp borg check: the repository check computes the checksums of all chunks (medium CPU usage) borg delete repo: low CPU usage .TP .B CPU (only for client/server operation): When using borg in a client/server way with a \fI\%ssh:\-type\fP repo, the ssh processes used for the transport layer will need some CPU on the client and on the server due to the crypto they are doing \- esp. if you are pumping big amounts of data. .TP .B Memory (RAM) client: The chunks index and the files index are read into memory for performance reasons. Might need big amounts of memory (see below). Compression, esp. lzma compression with high levels might need substantial amounts of memory. .TP .B Memory (RAM) server: The server process will load the repository index into memory. Might need considerable amounts of memory, but less than on the client (see below). .TP .B Chunks index (client only): Proportional to the amount of data chunks in your repo. Lots of chunks in your repo imply a big chunks index. It is possible to tweak the chunker params (see create options). .TP .B Files index (client only): Proportional to the amount of files in your last backups. Can be switched off (see create options), but next backup might be much slower if you do. The speed benefit of using the files cache is proportional to file size. .TP .B Repository index (server only): Proportional to the amount of data chunks in your repo. Lots of chunks in your repo imply a big repository index. It is possible to tweak the chunker params (see create options) to influence the amount of chunks being created. .TP .B Temporary files (client): Reading data and metadata from a FUSE mounted repository will consume up to the size of all deduplicated, small chunks in the repository. Big chunks won\(aqt be locally cached. .TP .B Temporary files (server): A non\-trivial amount of data will be stored on the remote temp directory for each client that connects to it. For some remotes, this can fill the default temporary directory at /tmp. This can be remediated by ensuring the $TMPDIR, $TEMP, or $TMP environment variable is properly set for the sshd process. For some OSes, this can be done just by setting the correct value in the \&.bashrc (or equivalent login config file for other shells), however in other cases it may be necessary to first enable \fBPermitUserEnvironment yes\fP in your \fBsshd_config\fP file, then add \fBenvironment="TMPDIR=/my/big/tmpdir"\fP at the start of the public key to be used in the \fBauthorized_hosts\fP file. .TP .B Cache files (client only): Contains the chunks index and files index (plus a collection of single\- archive chunk indexes which might need huge amounts of disk space, depending on archive count and size \- see FAQ about how to reduce). .TP .B Network (only for client/server operation): If your repository is remote, all deduplicated (and optionally compressed/ encrypted) data of course has to go over the connection (\fBssh://\fP repo url). If you use a locally mounted network filesystem, additionally some copy operations used for transaction support also go over the connection. If you backup multiple sources to one target repository, additional traffic happens for cache resynchronization. .UNINDENT .SS Support for file metadata .sp Besides regular file and directory structures, Borg can preserve .INDENT 0.0 .IP \(bu 2 symlinks (stored as symlink, the symlink is not followed) .IP \(bu 2 special files: .INDENT 2.0 .IP \(bu 2 character and block device files (restored via mknod) .IP \(bu 2 FIFOs ("named pipes") .IP \(bu 2 special file \fIcontents\fP can be backed up in \fB\-\-read\-special\fP mode. By default the metadata to create them with mknod(2), mkfifo(2) etc. is stored. .UNINDENT .IP \(bu 2 hardlinked regular files, devices, FIFOs (considering all items in the same archive) .IP \(bu 2 timestamps in nanosecond precision: mtime, atime, ctime .IP \(bu 2 other timestamps: birthtime (on platforms supporting it) .IP \(bu 2 permissions: .INDENT 2.0 .IP \(bu 2 IDs of owning user and owning group .IP \(bu 2 names of owning user and owning group (if the IDs can be resolved) .IP \(bu 2 Unix Mode/Permissions (u/g/o permissions, suid, sgid, sticky) .UNINDENT .UNINDENT .sp On some platforms additional features are supported: .\" Yes/No's are grouped by reason/mechanism/reference. . .TS center; |l|l|l|l|. _ T{ Platform T} T{ ACLs [5] T} T{ xattr [6] T} T{ Flags [7] T} _ T{ Linux T} T{ Yes T} T{ Yes T} T{ Yes [1] T} _ T{ Mac OS X T} T{ Yes T} T{ Yes T} T{ Yes (all) T} _ T{ FreeBSD T} T{ Yes T} T{ Yes T} T{ Yes (all) T} _ T{ OpenBSD T} T{ n/a T} T{ n/a T} T{ Yes (all) T} _ T{ NetBSD T} T{ n/a T} T{ No [2] T} T{ Yes (all) T} _ T{ Solaris and derivatives T} T{ No [3] T} T{ No [3] T} T{ n/a T} _ T{ Windows (cygwin) T} T{ No [4] T} T{ No T} T{ No T} _ .TE .sp Other Unix\-like operating systems may work as well, but have not been tested at all. .sp Note that most of the platform\-dependent features also depend on the file system. For example, ntfs\-3g on Linux isn\(aqt able to convey NTFS ACLs. .IP [1] 5 Only "nodump", "immutable", "compressed" and "append" are supported. Feature request #618 for more flags. .IP [2] 5 Feature request #1332 .IP [3] 5 Feature request #1337 .IP [4] 5 Cygwin tries to map NTFS ACLs to permissions with varying degrees of success. .IP [5] 5 The native access control list mechanism of the OS. This normally limits access to non\-native ACLs. For example, NTFS ACLs aren\(aqt completely accessible on Linux with ntfs\-3g. .IP [6] 5 extended attributes; key\-value pairs attached to a file, mainly used by the OS. This includes resource forks on Mac OS X. .IP [7] 5 aka \fIBSD flags\fP\&. The Linux set of flags [1] is portable across platforms. The BSDs define additional flags. .SH SEE ALSO .sp \fIborg\-common(1)\fP for common command line options .sp \fIborg\-init(1)\fP, \fIborg\-create(1)\fP, \fIborg\-mount(1)\fP, \fIborg\-extract(1)\fP, \fIborg\-list(1)\fP, \fIborg\-info(1)\fP, \fIborg\-delete(1)\fP, \fIborg\-prune(1)\fP, \fIborg\-recreate(1)\fP .sp \fIborg\-compression(1)\fP, \fIborg\-patterns(1)\fP, \fIborg\-placeholders(1)\fP .INDENT 0.0 .IP \(bu 2 Main web site \fI\%https://www.borgbackup.org/\fP .IP \(bu 2 Releases \fI\%https://github.com/borgbackup/borg/releases\fP .IP \(bu 2 Changelog \fI\%https://github.com/borgbackup/borg/blob/master/docs/changes.rst\fP .IP \(bu 2 GitHub \fI\%https://github.com/borgbackup/borg\fP .IP \(bu 2 Security contact \fI\%https://borgbackup.readthedocs.io/en/latest/support.html#security\-contact\fP .UNINDENT .SH AUTHOR The Borg Collective .\" Generated by docutils manpage writer. . borgbackup-1.1.15/docs/man/borg-patterns.10000644000175000017500000002373713771325506020275 0ustar useruser00000000000000.\" Man page generated from reStructuredText. . .TH BORG-PATTERNS 1 "2020-12-24" "" "borg backup tool" .SH NAME borg-patterns \- Details regarding patterns . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH DESCRIPTION .sp The path/filenames used as input for the pattern matching start from the currently active recursion root. You usually give the recursion root(s) when invoking borg and these can be either relative or absolute paths. .sp So, when you give \fIrelative/\fP as root, the paths going into the matcher will look like \fIrelative/.../file.ext\fP\&. When you give \fI/absolute/\fP as root, they will look like \fI/absolute/.../file.ext\fP\&. This is meant when we talk about "full path" below. .sp File paths in Borg archives are always stored normalized and relative. This means that e.g. \fBborg create /path/to/repo ../some/path\fP will store all files as \fIsome/path/.../file.ext\fP and \fBborg create /path/to/repo /home/user\fP will store all files as \fIhome/user/.../file.ext\fP\&. Therefore, always use relative paths in your patterns when matching archive content in commands like \fBextract\fP or \fBmount\fP\&. Starting with Borg 1.2 this behaviour will be changed to accept both absolute and relative paths. .sp File patterns support these styles: fnmatch, shell, regular expressions, path prefixes and path full\-matches. By default, fnmatch is used for \fB\-\-exclude\fP patterns and shell\-style is used for the experimental \fB\-\-pattern\fP option. .sp If followed by a colon (\(aq:\(aq) the first two characters of a pattern are used as a style selector. Explicit style selection is necessary when a non\-default style is desired or when the desired pattern starts with two alphanumeric characters followed by a colon (i.e. \fIaa:something/*\fP). .INDENT 0.0 .TP .B \fI\%Fnmatch\fP, selector \fIfm:\fP This is the default style for \fB\-\-exclude\fP and \fB\-\-exclude\-from\fP\&. These patterns use a variant of shell pattern syntax, with \(aq*\(aq matching any number of characters, \(aq?\(aq matching any single character, \(aq[...]\(aq matching any single character specified, including ranges, and \(aq[!...]\(aq matching any character not specified. For the purpose of these patterns, the path separator (backslash for Windows and \(aq/\(aq on other systems) is not treated specially. Wrap meta\-characters in brackets for a literal match (i.e. \fI[?]\fP to match the literal character \fI?\fP). For a path to match a pattern, the full path must match, or it must match from the start of the full path to just before a path separator. Except for the root path, paths will never end in the path separator when matching is attempted. Thus, if a given pattern ends in a path separator, a \(aq*\(aq is appended before matching is attempted. .TP .B Shell\-style patterns, selector \fIsh:\fP This is the default style for \fB\-\-pattern\fP and \fB\-\-patterns\-from\fP\&. Like fnmatch patterns these are similar to shell patterns. The difference is that the pattern may include \fI**/\fP for matching zero or more directory levels, \fI*\fP for matching zero or more arbitrary characters with the exception of any path separator. .TP .B Regular expressions, selector \fIre:\fP Regular expressions similar to those found in Perl are supported. Unlike shell patterns regular expressions are not required to match the full path and any substring match is sufficient. It is strongly recommended to anchor patterns to the start (\(aq^\(aq), to the end (\(aq$\(aq) or both. Path separators (backslash for Windows and \(aq/\(aq on other systems) in paths are always normalized to a forward slash (\(aq/\(aq) before applying a pattern. The regular expression syntax is described in the \fI\%Python documentation for the re module\fP\&. .TP .B Path prefix, selector \fIpp:\fP This pattern style is useful to match whole sub\-directories. The pattern \fIpp:root/somedir\fP matches \fIroot/somedir\fP and everything therein. .TP .B Path full\-match, selector \fIpf:\fP This pattern style is (only) useful to match full paths. This is kind of a pseudo pattern as it can not have any variable or unspecified parts \- the full path must be given. \fIpf:root/file.ext\fP matches \fIroot/file.ext\fP only. .sp Implementation note: this is implemented via very time\-efficient O(1) hashtable lookups (this means you can have huge amounts of such patterns without impacting performance much). Due to that, this kind of pattern does not respect any context or order. If you use such a pattern to include a file, it will always be included (if the directory recursion encounters it). Other include/exclude patterns that would normally match will be ignored. Same logic applies for exclude. .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 \fIre:\fP, \fIsh:\fP and \fIfm:\fP patterns are all implemented on top of the Python SRE engine. It is very easy to formulate patterns for each of these types which requires an inordinate amount of time to match paths. If untrusted users are able to supply patterns, ensure they cannot supply \fIre:\fP patterns. Further, ensure that \fIsh:\fP and \fIfm:\fP patterns only contain a handful of wildcards at most. .UNINDENT .UNINDENT .sp Exclusions can be passed via the command line option \fB\-\-exclude\fP\&. When used from within a shell the patterns should be quoted to protect them from expansion. .sp The \fB\-\-exclude\-from\fP option permits loading exclusion patterns from a text file with one pattern per line. Lines empty or starting with the number sign (\(aq#\(aq) after removing whitespace on both ends are ignored. The optional style selector prefix is also supported for patterns loaded from a file. Due to whitespace removal paths with whitespace at the beginning or end can only be excluded using regular expressions. .sp To test your exclusion patterns without performing an actual backup you can run \fBborg create \-\-list \-\-dry\-run ...\fP\&. .sp Examples: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C # Exclude \(aq/home/user/file.o\(aq but not \(aq/home/user/file.odt\(aq: $ borg create \-e \(aq*.o\(aq backup / # Exclude \(aq/home/user/junk\(aq and \(aq/home/user/subdir/junk\(aq but # not \(aq/home/user/importantjunk\(aq or \(aq/etc/junk\(aq: $ borg create \-e \(aq/home/*/junk\(aq backup / # Exclude the contents of \(aq/home/user/cache\(aq but not the directory itself: $ borg create \-e /home/user/cache/ backup / # The file \(aq/home/user/cache/important\(aq is *not* backed up: $ borg create \-e /home/user/cache/ backup / /home/user/cache/important # The contents of directories in \(aq/home\(aq are not backed up when their name # ends in \(aq.tmp\(aq $ borg create \-\-exclude \(aqre:^/home/[^/]+\e.tmp/\(aq backup / # Load exclusions from file $ cat >exclude.txt < [options] [arguments] DESCRIPTION ----------- .. we don't include the README.rst here since we want to keep this terse. BorgBackup (short: Borg) is a deduplicating backup program. Optionally, it supports compression and authenticated encryption. The main goal of Borg is to provide an efficient and secure way to backup data. The data deduplication technique used makes Borg suitable for daily backups since only changes are stored. The authenticated encryption technique makes it suitable for backups to not fully trusted targets. Borg stores a set of files in an *archive*. A *repository* is a collection of *archives*. The format of repositories is Borg-specific. Borg does not distinguish archives from each other in any way other than their name, it does not matter when or where archives were created (e.g. different hosts). EXAMPLES -------- A step-by-step example ~~~~~~~~~~~~~~~~~~~~~~ .. include:: quickstart_example.rst.inc NOTES ----- .. include:: usage_general.rst.inc SEE ALSO -------- `borg-common(1)` for common command line options `borg-init(1)`, `borg-create(1)`, `borg-mount(1)`, `borg-extract(1)`, `borg-list(1)`, `borg-info(1)`, `borg-delete(1)`, `borg-prune(1)`, `borg-recreate(1)` `borg-compression(1)`, `borg-patterns(1)`, `borg-placeholders(1)` * Main web site https://www.borgbackup.org/ * Releases https://github.com/borgbackup/borg/releases * Changelog https://github.com/borgbackup/borg/blob/master/docs/changes.rst * GitHub https://github.com/borgbackup/borg * Security contact https://borgbackup.readthedocs.io/en/latest/support.html#security-contact borgbackup-1.1.15/docs/development.rst0000644000175000017500000003014613771325506017715 0ustar useruser00000000000000.. include:: global.rst.inc .. highlight:: bash .. _development: Development =========== This chapter will get you started with |project_name| development. |project_name| is written in Python (with a little bit of Cython and C for the performance critical parts). Contributions ------------- ... are welcome! Some guidance for contributors: - Discuss changes on the GitHub issue tracker, on IRC or on the mailing list. - Make your PRs on the ``master`` branch (see `Branching Model`_ for details). - Do clean changesets: - Focus on some topic, resist changing anything else. - Do not do style changes mixed with functional changes. - Try to avoid refactorings mixed with functional changes. - If you need to fix something after commit/push: - If there are ongoing reviews: do a fixup commit you can squash into the bad commit later. - If there are no ongoing reviews or you did not push the bad commit yet: amend the commit to include your fix or merge the fixup commit before pushing. - Have a nice, clear, typo-free commit comment. - If you fixed an issue, refer to it in your commit comment. - Follow the style guide (see below). - If you write new code, please add tests and docs for it. - Run the tests, fix any issues that come up. - Make a pull request on GitHub. - Wait for review by other developers. Branching model --------------- Borg development happens on the ``master`` branch and uses GitHub pull requests (if you don't have GitHub or don't want to use it you can send smaller patches via the borgbackup mailing list to the maintainers). Stable releases are maintained on maintenance branches named ``x.y-maint``, eg. the maintenance branch of the 1.0.x series is ``1.0-maint``. Most PRs should be filed against the ``master`` branch. Only if an issue affects **only** a particular maintenance branch a PR should be filed against it directly. While discussing / reviewing a PR it will be decided whether the change should be applied to maintenance branches. Each maintenance branch has a corresponding *backport/x.y-maint* label, which will then be applied. Changes that are typically considered for backporting: - Data loss, corruption and inaccessibility fixes. - Security fixes. - Forward-compatibility improvements. - Documentation corrections. .. rubric:: Maintainer part From time to time a maintainer will backport the changes for a maintenance branch, typically before a release or if enough changes were collected: 1. Notify others that you're doing this to avoid duplicate work. 2. Branch a backporting branch off the maintenance branch. 3. Cherry pick and backport the changes from each labelled PR, remove the label for each PR you've backported. To preserve authorship metadata, do not follow the ``git cherry-pick`` instructions to use ``git commit`` after resolving conflicts. Instead, stage conflict resolutions and run ``git cherry-pick --continue``, much like using ``git rebase``. To avoid merge issues (a cherry pick is a form of merge), use these options (similar to the ``git merge`` options used previously, the ``-x`` option adds a reference to the original commit):: git cherry-pick --strategy recursive -X rename-threshold=5% -x 4. Make a PR of the backporting branch against the maintenance branch for backport review. Mention the backported PRs in this PR, e.g.: Includes changes from #2055 #2057 #2381 This way GitHub will automatically show in these PRs where they were backported. .. rubric:: Historic model Previously (until release 1.0.10) Borg used a `"merge upwards" `_ model where most minor changes and fixes where committed to a maintenance branch (eg. 1.0-maint), and the maintenance branch(es) were regularly merged back into the main development branch. This became more and more troublesome due to merges growing more conflict-heavy and error-prone. Code and issues --------------- Code is stored on GitHub, in the `Borgbackup organization `_. `Issues `_ and `pull requests `_ should be sent there as well. See also the :ref:`support` section for more details. Style guide ----------- We generally follow `pep8 `_, with 120 columns instead of 79. We do *not* use form-feed (``^L``) characters to separate sections either. Compliance is tested automatically when you run the tests. Continuous Integration ---------------------- All pull requests go through `GitHub Actions`_, which runs the tests on Linux and Mac OS X as well as the flake8 style checker. Windows builds run on AppVeyor_, while additional Unix-like platforms are tested on Golem_. .. _AppVeyor: https://ci.appveyor.com/project/borgbackup/borg/ .. _Golem: https://golem.enkore.de/view/Borg/ .. _GitHub Actions: https://github.com/borgbackup/borg/actions Output and Logging ------------------ When writing logger calls, always use correct log level (debug only for debugging, info for informative messages, warning for warnings, error for errors, critical for critical errors/states). When directly talking to the user (e.g. Y/N questions), do not use logging, but directly output to stderr (not: stdout, it could be connected to a pipe). To control the amount and kinds of messages output emitted at info level, use flags like ``--stats`` or ``--list``, then create a topic logger for messages controlled by that flag. See ``_setup_implied_logging()`` in ``borg/archiver.py`` for the entry point to topic logging. Building a development environment ---------------------------------- First, just install borg into a virtual env as described before. To install some additional packages needed for running the tests, activate your virtual env and run:: pip install -r requirements.d/development.txt Running the tests ----------------- The tests are in the borg/testsuite package. To run all the tests, you need to have fakeroot installed. If you do not have fakeroot, you still will be able to run most tests, just leave away the `fakeroot -u` from the given command lines. To run the test suite use the following command:: fakeroot -u tox # run all tests Some more advanced examples:: # verify a changed tox.ini (run this after any change to tox.ini): fakeroot -u tox --recreate fakeroot -u tox -e py37 # run all tests, but only on python 3.7 fakeroot -u tox borg.testsuite.locking # only run 1 test module fakeroot -u tox borg.testsuite.locking -- -k '"not Timer"' # exclude some tests fakeroot -u tox borg.testsuite -- -v # verbose py.test Important notes: - When using ``--`` to give options to py.test, you MUST also give ``borg.testsuite[.module]``. Running more checks using coala ------------------------------- First install coala and some checkers ("bears"): :: pip install -r requirements.d/coala.txt You can now run coala from the toplevel directory; it will read its settings from ``.coafile`` there: :: coala Some bears have additional requirements and they usually tell you about them in case they are missing. Adding a compression algorithm ------------------------------ If you want to add a new compression algorithm, please refer to :issue:`1633` and leave a post there in order to discuss about the proposal. Documentation ------------- Generated files ~~~~~~~~~~~~~~~ Usage documentation (found in ``docs/usage/``) and man pages (``docs/man/``) are generated automatically from the command line parsers declared in the program and their documentation, which is embedded in the program (see archiver.py). These are committed to git for easier use by packagers downstream. When a command is added, a command line flag changed, added or removed, the usage docs need to be rebuilt as well:: python setup.py build_usage python setup.py build_man However, we prefer to do this as part of our :ref:`releasing` preparations, so it is generally not necessary to update these when submitting patches that change something about the command line. Building the docs with Sphinx ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The documentation (in reStructuredText format, .rst) is in docs/. To build the html version of it, you need to have Sphinx installed (in your Borg virtualenv with Python 3):: pip install -r requirements.d/docs.txt Now run:: cd docs/ make html Then point a web browser at docs/_build/html/index.html. The website is updated automatically by ReadTheDocs through GitHub web hooks on the main repository. Using Vagrant ------------- We use Vagrant for the automated creation of testing environments and borgbackup standalone binaries for various platforms. For better security, there is no automatic sync in the VM to host direction. The plugin `vagrant-scp` is useful to copy stuff from the VMs to the host. The "windows10" box requires the `reload` plugin (``vagrant plugin install vagrant-reload``). Usage:: # To create and provision the VM: vagrant up OS # same, but use 6 VM cpus and 12 workers for pytest: VMCPUS=6 XDISTN=12 vagrant up OS # To create an ssh session to the VM: vagrant ssh OS # To execute a command via ssh in the VM: vagrant ssh OS -c "command args" # To shut down the VM: vagrant halt OS # To shut down and destroy the VM: vagrant destroy OS # To copy files from the VM (in this case, the generated binary): vagrant scp OS:/vagrant/borg/borg.exe . Creating standalone binaries ---------------------------- Make sure you have everything built and installed (including llfuse and fuse). When using the Vagrant VMs, pyinstaller will already be installed. With virtual env activated:: pip install pyinstaller # or git checkout master pyinstaller -F -n borg-PLATFORM borg/__main__.py for file in dist/borg-*; do gpg --armor --detach-sign $file; done If you encounter issues, see also our `Vagrantfile` for details. .. note:: Standalone binaries built with pyinstaller are supposed to work on same OS, same architecture (x86 32bit, amd64 64bit) without external dependencies. .. _releasing: Creating a new release ---------------------- Checklist: - Make sure all issues for this milestone are closed or moved to the next milestone. - Check if there are any pending fixes for security issues. - Find and fix any low hanging fruit left on the issue tracker. - Check that GitHub Actions CI is happy. - Update ``CHANGES.rst``, based on ``git log $PREVIOUS_RELEASE..``. - Check version number of upcoming release in ``CHANGES.rst``. - Render ``CHANGES.rst`` via ``make html`` and check for markup errors. - Verify that ``MANIFEST.in`` and ``setup.py`` are complete. - ``python setup.py build_usage ; python setup.py build_man`` and commit (be sure to build with Python 3.5 as Python 3.6 added `more guaranteed hashing algorithms `_). - Tag the release:: git tag -s -m "tagged/signed release X.Y.Z" X.Y.Z - Create a clean repo and use it for the following steps:: git clone borg borg-clean This makes sure no uncommitted files get into the release archive. It will also reveal uncommitted required files. Moreover, it makes sure the vagrant machines only get committed files and do a fresh start based on that. - Run tox and/or binary builds on all supported platforms via vagrant, check for test failures. - Create sdist, sign it, upload release to (test) PyPi: :: scripts/sdist-sign X.Y.Z scripts/upload-pypi X.Y.Z test scripts/upload-pypi X.Y.Z - Put binaries into dist/borg-OSNAME and sign them: :: scripts/sign-binaries 201912312359 - Close the release milestone on GitHub. - `Update borgbackup.org `_ with the new version number and release date. - Announce on: - Mailing list. - Twitter. - IRC channel (change ``/topic``). - Create a GitHub release, include: * Standalone binaries (see above for how to create them). + For OS X, document the OS X Fuse version in the README of the binaries. OS X FUSE uses a kernel extension that needs to be compatible with the code contained in the binary. * A link to ``CHANGES.rst``. borgbackup-1.1.15/docs/quickstart_example.rst.inc0000644000175000017500000000505213771325506022046 0ustar useruser000000000000001. Before a backup can be made a repository has to be initialized:: $ borg init --encryption=repokey /path/to/repo 2. Backup the ``~/src`` and ``~/Documents`` directories into an archive called *Monday*:: $ borg create /path/to/repo::Monday ~/src ~/Documents 3. The next day create a new archive called *Tuesday*:: $ borg create --stats /path/to/repo::Tuesday ~/src ~/Documents This backup will be a lot quicker and a lot smaller since only new never before seen data is stored. The ``--stats`` option causes Borg to output statistics about the newly created archive such as the amount of unique data (not shared with other archives):: ------------------------------------------------------------------------------ Archive name: Tuesday Archive fingerprint: bd31004d58f51ea06ff735d2e5ac49376901b21d58035f8fb05dbf866566e3c2 Time (start): Tue, 2016-02-16 18:15:11 Time (end): Tue, 2016-02-16 18:15:11 Duration: 0.19 seconds Number of files: 127 ------------------------------------------------------------------------------ Original size Compressed size Deduplicated size This archive: 4.16 MB 4.17 MB 26.78 kB All archives: 8.33 MB 8.34 MB 4.19 MB Unique chunks Total chunks Chunk index: 132 261 ------------------------------------------------------------------------------ 4. List all archives in the repository:: $ borg list /path/to/repo Monday Mon, 2016-02-15 19:14:44 Tuesday Tue, 2016-02-16 19:15:11 5. List the contents of the *Monday* archive:: $ borg list /path/to/repo::Monday drwxr-xr-x user group 0 Mon, 2016-02-15 18:22:30 home/user/Documents -rw-r--r-- user group 7961 Mon, 2016-02-15 18:22:30 home/user/Documents/Important.doc ... 6. Restore the *Monday* archive by extracting the files relative to the current directory:: $ borg extract /path/to/repo::Monday 7. Recover disk space by manually deleting the *Monday* archive:: $ borg delete /path/to/repo::Monday .. Note:: Borg is quiet by default (it works on WARNING log level). You can use options like ``--progress`` or ``--list`` to get specific reports during command execution. You can also add the ``-v`` (or ``--verbose`` or ``--info``) option to adjust the log level to INFO to get other informational messages. borgbackup-1.1.15/docs/support.rst0000644000175000017500000000374413771325506017113 0ustar useruser00000000000000.. _support: Support ======= Support and Services -------------------- Please see https://www.borgbackup.org/ for free and paid support and service options. .. _security-contact: Security -------- In case you discover a security issue, please use this contact for reporting it privately and please, if possible, use encrypted E-Mail: Thomas Waldmann GPG Key Fingerprint: 6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393 The public key can be fetched from any GPG keyserver, but be careful: you must use the **full fingerprint** to check that you got the correct key. Verifying signed releases ------------------------- `Releases `_ are signed with the same GPG key and a .asc file is provided for each binary. To verify a signature, the public key needs to be known to GPG. It can be imported into the local keystore from a keyserver with the fingerprint:: gpg --recv-keys "6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393" If GPG successfully imported the key, the output should be (among other things): 'Total number processed: 1'. To verify for example the signature of the borg-linux64 binary:: gpg --verify borg-linux64.asc GPG outputs if it finds a good signature. The output should look similar to this:: gpg: Signature made Sat 30 Dec 2017 01:07:36 PM CET using RSA key ID 51F78E01 gpg: Good signature from "Thomas Waldmann " gpg: aka "Thomas Waldmann " gpg: aka "Thomas Waldmann " gpg: aka "Thomas Waldmann " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. Primary key fingerprint: 6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393 Subkey fingerprint: 2F81 AFFB AB04 E11F E8EE 65D4 243A CFA9 51F7 8E01 If you want to make absolutely sure that you have the right key, you need to verify it via another channel and assign a trust-level to it. borgbackup-1.1.15/docs/Makefile0000644000175000017500000001074613771325506016305 0ustar useruser00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/borg.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/borg.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/borg" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/borg" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." borgbackup-1.1.15/docs/internals.rst0000644000175000017500000000310013771325506017360 0ustar useruser00000000000000.. include:: global.rst.inc .. _internals: Internals ========= The internals chapter describes and analyses most of the inner workings of Borg. Borg uses a low-level, key-value store, the :ref:`repository`, and implements a more complex data structure on top of it, which is made up of the :ref:`manifest `, :ref:`archives `, :ref:`items ` and data :ref:`chunks`. Each repository can hold multiple :ref:`archives `, which represent individual backups that contain a full archive of the files specified when the backup was performed. Deduplication is performed globally across all data in the repository (multiple backups and even multiple hosts), both on data and file metadata, using :ref:`chunks` created by the chunker using the Buzhash_ algorithm. To actually perform the repository-wide deduplication, a hash of each chunk is checked against the :ref:`chunks cache `, which is a hash-table of all chunks that already exist. .. figure:: internals/structure.png :figwidth: 100% :width: 100% Layers in Borg. On the very top commands are implemented, using a data access layer provided by the Archive and Item classes. The "key" object provides both compression and authenticated encryption used by the data access layer. The "key" object represents the sole trust boundary in Borg. The lowest layer is the repository, either accessed directly (Repository) or remotely (RemoteRepository). .. toctree:: :caption: Internals contents internals/security internals/data-structures internals/frontends borgbackup-1.1.15/docs/deployment/0000755000175000017500000000000013771325773017023 5ustar useruser00000000000000borgbackup-1.1.15/docs/deployment/automated-local.rst0000644000175000017500000002063013771325506022623 0ustar useruser00000000000000.. include:: ../global.rst.inc .. highlight:: none Automated backups to a local hard drive ======================================= This guide shows how to automate backups to a hard drive directly connected to your computer. If a backup hard drive is connected, backups are automatically started, and the drive shut-down and disconnected when they are done. This guide is written for a Linux-based operating system and makes use of systemd and udev. Overview -------- An udev rule is created to trigger on the addition of block devices. The rule contains a tag that triggers systemd to start a oneshot service. The oneshot service executes a script in the standard systemd service environment, which automatically captures stdout/stderr and logs it to the journal. The script mounts the added block device, if it is a registered backup drive, and creates backups on it. When done, it optionally unmounts the file system and spins the drive down, so that it may be physically disconnected. Configuring the system ---------------------- First, create the ``/etc/backups`` directory (as root). All configuration goes into this directory. Then, create ``etc/backups/40-backup.rules`` with the following content (all on one line):: ACTION=="add", SUBSYSTEM=="bdi", DEVPATH=="/devices/virtual/bdi/*", TAG+="systemd", ENV{SYSTEMD_WANTS}="automatic-backup.service" .. topic:: Finding a more precise udev rule If you always connect the drive(s) to the same physical hardware path, e.g. the same eSATA port, then you can make a more precise udev rule. Execute ``udevadm monitor`` and connect a drive to the port you intend to use. You should see a flurry of events, find those regarding the `block` subsystem. Pick the event whose device path ends in something similar to a device file name, typically`sdX/sdXY`. Use the event's device path and replace `sdX/sdXY` after the `/block/` part in the path with a star (\*). For example: `DEVPATH=="/devices/pci0000:00/0000:00:11.0/ata3/host2/target2:0:0/2:0:0:0/block/*"`. Reboot a few times to ensure that the hardware path does not change: on some motherboards components of it can be random. In these cases you cannot use a more accurate rule, or need to insert additional stars for matching the path. The "systemd" tag in conjunction with the SYSTEMD_WANTS environment variable has systemd launch the "automatic-backup" service, which we will create next, as the ``/etc/backups/automatic-backup.service`` file: .. code-block:: ini [Service] Type=oneshot ExecStart=/etc/backups/run.sh Now, create the main backup script, ``/etc/backups/run.sh``. Below is a template, modify it to suit your needs (e.g. more backup sets, dumping databases etc.). .. code-block:: bash #!/bin/bash -ue # The udev rule is not terribly accurate and may trigger our service before # the kernel has finished probing partitions. Sleep for a bit to ensure # the kernel is done. # # This can be avoided by using a more precise udev rule, e.g. matching # a specific hardware path and partition. sleep 5 # # Script configuration # # The backup partition is mounted there MOUNTPOINT=/mnt/backup # This is the location of the Borg repository TARGET=$MOUNTPOINT/borg-backups/backup.borg # Archive name schema DATE=$(date --iso-8601)-$(hostname) # This is the file that will later contain UUIDs of registered backup drives DISKS=/etc/backups/backup.disks # Find whether the connected block device is a backup drive for uuid in $(lsblk --noheadings --list --output uuid) do if grep --quiet --fixed-strings $uuid $DISKS; then break fi uuid= done if [ ! $uuid ]; then echo "No backup disk found, exiting" exit 0 fi echo "Disk $uuid is a backup disk" partition_path=/dev/disk/by-uuid/$uuid # Mount file system if not already done. This assumes that if something is already # mounted at $MOUNTPOINT, it is the backup drive. It won't find the drive if # it was mounted somewhere else. (mount | grep $MOUNTPOINT) || mount $partition_path $MOUNTPOINT drive=$(lsblk --inverse --noheadings --list --paths --output name $partition_path | head --lines 1) echo "Drive path: $drive" # # Create backups # # Options for borg create BORG_OPTS="--stats --one-file-system --compression lz4 --checkpoint-interval 86400" # Set BORG_PASSPHRASE or BORG_PASSCOMMAND somewhere around here, using export, # if encryption is used. # No one can answer if Borg asks these questions, it is better to just fail quickly # instead of hanging. export BORG_RELOCATED_REPO_ACCESS_IS_OK=no export BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=no # Log Borg version borg --version echo "Starting backup for $DATE" # This is just an example, change it however you see fit borg create $BORG_OPTS \ --exclude /root/.cache \ --exclude /var/cache \ --exclude /var/lib/docker/devicemapper \ $TARGET::$DATE-$$-system \ / /boot # /home is often a separate partition / file system. # Even if it isn't (add --exclude /home above), it probably makes sense # to have /home in a separate archive. borg create $BORG_OPTS \ --exclude 'sh:/home/*/.cache' \ $TARGET::$DATE-$$-home \ /home/ echo "Completed backup for $DATE" # Just to be completely paranoid sync if [ -f /etc/backups/autoeject ]; then umount $MOUNTPOINT hdparm -Y $drive fi if [ -f /etc/backups/backup-suspend ]; then systemctl suspend fi Create the ``/etc/backups/autoeject`` file to have the script automatically eject the drive after creating the backup. Rename the file to something else (e.g. ``/etc/backup/autoeject-no``) when you want to do something with the drive after creating backups (e.g running check). Create the ``/etc/backups/backup-suspend`` file if the machine should suspend after completing the backup. Don't forget to physically disconnect the device before resuming, otherwise you'll enter a cycle. You can also add an option to power down instead. Create an empty ``/etc/backups/backup.disks`` file, you'll register your backup drives there. The last part is to actually enable the udev rules and services: .. code-block:: bash ln -s /etc/backups/40-backup.rules /etc/udev/rules.d/40-backup.rules ln -s /etc/backups/automatic-backup.service /etc/systemd/system/automatic-backup.service systemctl daemon-reload udevadm control --reload Adding backup hard drives ------------------------- Connect your backup hard drive. Format it, if not done already. Find the UUID of the file system that backups should be stored on:: lsblk -o+uuid,label Note the UUID into the ``/etc/backup/backup.disks`` file. Mount the drive to /mnt/backup. Initialize a Borg repository at the location indicated by ``TARGET``:: borg init --encryption ... /mnt/backup/borg-backups/backup.borg Unmount and reconnect the drive, or manually start the ``automatic-backup`` service to start the first backup:: systemctl start --no-block automatic-backup See backup logs using journalctl:: journalctl -fu automatic-backup [-n number-of-lines] Security considerations ----------------------- The script as shown above will mount any file system with an UUID listed in ``/etc/backup/backup.disks``. The UUID check is a safety / annoyance-reduction mechanism to keep the script from blowing up whenever a random USB thumb drive is connected. It is not meant as a security mechanism. Mounting file systems and reading repository data exposes additional attack surfaces (kernel file system drivers, possibly user space services and Borg itself). On the other hand, someone standing right next to your computer can attempt a lot of attacks, most of which are easier to do than e.g. exploiting file systems (installing a physical key logger, DMA attacks, stealing the machine, ...). Borg ensures that backups are not created on random drives that "just happen" to contain a Borg repository. If an unknown unencrypted repository is encountered, then the script aborts (BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=no). Backups are only created on hard drives that contain a Borg repository that is either known (by ID) to your machine or you are using encryption and the passphrase of the repository has to match the passphrase supplied to Borg. borgbackup-1.1.15/docs/deployment/hosting-repositories.rst0000644000175000017500000000652213771325506023754 0ustar useruser00000000000000.. include:: ../global.rst.inc .. highlight:: none .. _hosting_repositories: Hosting repositories ==================== This sections shows how to securely provide repository storage for users. Optionally, each user can have a storage quota. Repositories are accessed through SSH. Each user of the service should have her own login which is only able to access the user's files. Technically it would be possible to have multiple users share one login, however, separating them is better. Separate logins increase isolation and are thus an additional layer of security and safety for both the provider and the users. For example, if a user manages to breach ``borg serve`` then she can only damage her own data (assuming that the system does not have further vulnerabilities). Use the standard directory structure of the operating system. Each user is assigned a home directory and repositories of the user reside in her home directory. The following ``~user/.ssh/authorized_keys`` file is the most important piece for a correct deployment. It allows the user to login via their public key (which must be provided by the user), and restricts SSH access to safe operations only. :: command="borg serve --restrict-to-repository /home//repository",restrict .. note:: The text shown above needs to be written on a **single** line! .. warning:: If this file should be automatically updated (e.g. by a web console), pay **utmost attention** to sanitizing user input. Strip all whitespace around the user-supplied key, ensure that it **only** contains ASCII with no control characters and that it consists of three parts separated by a single space. Ensure that no newlines are contained within the key. The ``restrict`` keyword enables all restrictions, i.e. disables port, agent and X11 forwarding, as well as disabling PTY allocation and execution of ~/.ssh/rc. If any future restriction capabilities are added to authorized_keys files they will be included in this set. The ``command`` keyword forces execution of the specified command line upon login. This must be ``borg serve``. The ``--restrict-to-repository`` option permits access to exactly **one** repository. It can be given multiple times to permit access to more than one repository. The repository may not exist yet; it can be initialized by the user, which allows for encryption. **Storage quotas** can be enabled by adding the ``--storage-quota`` option to the ``borg serve`` command line:: restrict,command="borg serve --storage-quota 20G ..." ... The storage quotas of repositories are completely independent. If a client is able to access multiple repositories, each repository can be filled to the specified quota. If storage quotas are used, ensure that all deployed Borg releases support storage quotas. Refer to :ref:`internals_storage_quota` for more details on storage quotas. **Specificities: Append-only repositories** Running ``borg init`` via a ``borg serve --append-only`` server will **not** create a repository that is configured to be append-only by its repository config. But, ``--append-only`` arguments in ``authorized_keys`` will override the repository config, therefore append-only mode can be enabled on a key by key basis. Refer to the `sshd(8) `_ man page for more details on SSH options. borgbackup-1.1.15/docs/deployment/pull-backup.rst0000644000175000017500000002607713771325506022002 0ustar useruser00000000000000.. include:: ../global.rst.inc .. highlight:: none Backing up in pull mode ======================= Assuming you have a pull backup system set up with borg, where a backup server pulls the data from the target via SSHFS. In this mode, the backup client's file system is mounted remotely on the backup server. Pull mode is even possible if the SSH connection must be established by the client via a remote tunnel. Other network file systems like NFS or SMB could be used as well, but SSHFS is very simple to set up and probably the most secure one. There are some restrictions caused by SSHFS. For example, unless you define UID and GID mappings when mounting via ``sshfs``, owners and groups of the mounted file system will probably change, and you may not have access to those files if BorgBackup is not run with root privileges. SSHFS is a FUSE file system and uses the SFTP protocol, so there may be also other unsupported features that the actual implementations of ssfs, libfuse and sftp on the backup server do not support, like file name encodings, ACLs, xattrs or bsdflags. So there is no guarantee that you are able to restore a system completely in every aspect from such a backup. .. warning:: To mount the client's root file system you will need root access to the client. This contradicts to the usual threat model of BorgBackup, where clients don't need to trust the backup server (data is encrypted). In pull mode the server (when logged in as root) could cause unlimited damage to the client. Therefore, pull mode should be used only from servers you do fully trust! Creating a backup ----------------- Generally, in a pull backup situation there is no direct way for borg to know the client's original UID:GID name mapping of files, because Borg would use ``/etc/passwd`` and ``/etc/group`` of the backup server to map the names. To derive the right names, Borg needs to have access to the client's passwd and group files and use them in the backup process. The solution to this problem is chrooting into an sshfs mounted directory. In this example the whole client root file system is mounted. We use the stand-alone BorgBackup executable and copy it into the mounted file system to make Borg available after entering chroot; this can be skipped if Borg is already installed on the client. :: # Mount client root file system. mkdir /tmp/sshfs sshfs root@host:/ /tmp/sshfs # Mount BorgBackup repository inside it. mkdir /tmp/sshfs/borgrepo mount --bind /path/to/repo /tmp/sshfs/borgrepo # Make borg executable available. cp /usr/local/bin/borg /tmp/sshfs/usr/local/bin/borg # Mount important system directories and enter chroot. cd /tmp/sshfs for i in dev proc sys; do mount --bind /$i $i; done chroot /tmp/sshfs Now we are on the backup system but inside a chroot with the client's root file system. We have a copy of Borg binary in ``/usr/local/bin`` and the repository in ``/borgrepo``. Borg will back up the client's user/group names, and we can create the backup, retaining the original paths, excluding the repository: :: borg create --exclude /borgrepo --files-cache ctime,size /borgrepo::archive / For the sake of simplicity only ``/borgrepo`` is excluded here. You may want to set up an exclude file with additional files and folders to be excluded. Also note that we have to modify Borg's file change detection behaviour – SSHFS cannot guarantee stable inode numbers, so we have to supply the ``--files-cache`` option. Finally, we need to exit chroot, unmount all the stuff and clean up: :: exit # exit chroot rm /tmp/sshfs/usr/local/bin/borg cd /tmp/sshfs for i in dev proc sys borgrepo; do umount ./$i; done rmdir borgrepo cd ~ umount /tmp/sshfs rmdir /tmp/sshfs Thanks to secuser on IRC for this how-to! Restore methods --------------- The counterpart of a pull backup is a push restore. Depending on the type of restore – full restore or partial restore – there are different methods to make sure the correct IDs are restored. Partial restore ~~~~~~~~~~~~~~~ In case of a partial restore, using the archived UIDs/GIDs might lead to wrong results if the name-to-ID mapping on the target system has changed compared to backup time (might be the case e.g. for a fresh OS install). The workaround again is chrooting into an sshfs mounted directory, so Borg is able to map the user/group names of the backup files to the actual IDs on the client. This example is similar to the backup above – only the Borg command is different: :: # Mount client root file system. mkdir /tmp/sshfs sshfs root@host:/ /tmp/sshfs # Mount BorgBackup repository inside it. mkdir /tmp/sshfs/borgrepo mount --bind /path/to/repo /tmp/sshfs/borgrepo # Make borg executable available. cp /usr/local/bin/borg /tmp/sshfs/usr/local/bin/borg # Mount important system directories and enter chroot. cd /tmp/sshfs for i in dev proc sys; do mount --bind /$i $i; done chroot /tmp/sshfs Now we can run :: borg extract /borgrepo::archive PATH to partially restore whatever we like. Finally, do the clean-up: :: exit # exit chroot rm /tmp/sshfs/usr/local/bin/borg cd /tmp/sshfs for i in dev proc sys borgrepo; do umount ./$i; done rmdir borgrepo cd ~ umount /tmp/sshfs rmdir /tmp/sshfs Full restore ~~~~~~~~~~~~ When doing a full restore, we restore all files (including the ones containing the ID-to-name mapping, ``/etc/passwd`` and ``/etc/group``). Everything will be consistent automatically if we restore the numeric IDs stored in the archive. So there is no need for a chroot environment; we just mount the client file system and extract a backup, utilizing the ``--numeric-owner`` option: :: sshfs root@host:/ /mnt/sshfs cd /mnt/sshfs borg extract --numeric-owner /path/to/repo::archive cd ~ umount /mnt/sshfs Simple (lossy) full restore ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Using ``borg export-tar`` it is possible to stream a backup to the client and directly extract it without the need of mounting with SSHFS: :: borg export-tar /path/to/repo::archive - | ssh root@host 'tar -C / -x' Note that in this scenario the tar format is the limiting factor – it cannot restore all the advanced features that BorgBackup supports. See :ref:`borg_export-tar` for limitations. ssh-agent ========= In this scenario *borg-server* initiates an SSH connection to *borg-client* and forwards the authentication agent connection. After that, it works similar to the push mode: *borg-client* initiates another SSH connection back to *borg-server* using the forwarded authentication agent connection to authenticate itself, starts ``borg serve`` and communicates with it. Using this method requires ssh access of user *borgs* to *borgc@borg-client*, where: * *borgs* is the user on the server side with read/write access to local borg repository. * *borgc* is the user on the client side with read access to files meant to be backed up. Applying this method for automated backup operations ---------------------------------------------------- Assume that the borg-client host is untrusted. Therefore we do some effort to prevent a hostile user on the borg-client side to do something harmful. In case of a fully trusted borg-client the method could be simplified. Preparing the server side ~~~~~~~~~~~~~~~~~~~~~~~~~ Do this once for each client on *borg-server* to allow *borgs* to connect itself on *borg-server* using a dedicated ssh key: :: borgs@borg-server$ install -m 700 -d ~/.ssh/ borgs@borg-server$ ssh-keygen -N '' -t rsa -f ~/.ssh/borg-client_key borgs@borg-server$ { echo -n 'command="borg serve --append-only --restrict-to-repo ~/repo",restrict '; cat ~/.ssh/borg-client_key.pub; } >> ~/.ssh/authorized_keys borgs@borg-server$ chmod 600 ~/.ssh/authorized_keys ``install -m 700 -d ~/.ssh/`` Create directory ~/.ssh with correct permissions if it does not exist yet. ``ssh-keygen -N '' -t rsa -f ~/.ssh/borg-client_key`` Create an ssh key dedicated to communication with borg-client. .. note:: Another more complex approach is using a unique ssh key for each pull operation. This is more secure as it guarantees that the key will not be used for other purposes. ``{ echo -n 'command="borg serve --append-only --restrict-to-repo ~/repo",restrict '; cat ~/.ssh/borg-client_key.pub; } >> ~/.ssh/authorized_keys`` Add borg-client's ssh public key to ~/.ssh/authorized_keys with forced command and restricted mode. The borg client is restricted to use one repo at the specified path and to append-only operation. Commands like *delete*, *prune* and *compact* have to be executed another way, for example directly on *borg-server* side or from a privileged, less restricted client (using another authorized_keys entry). ``chmod 600 ~/.ssh/authorized_keys`` Fix permissions of ~/.ssh/authorized_keys. Pull operation ~~~~~~~~~~~~~~ Initiating borg command execution from *borg-server* (e.g. init):: borgs@borg-server$ ( eval $(ssh-agent) > /dev/null ssh-add -q ~/.ssh/borg-client_key echo 'your secure borg key passphrase' | \ ssh -A -o StrictHostKeyChecking=no borgc@borg-client "BORG_PASSPHRASE=\$(cat) borg --rsh 'ssh -o StrictHostKeyChecking=no' init --encryption repokey ssh://borgs@borg-server/~/repo" kill "${SSH_AGENT_PID}" ) Parentheses around commands are needed to avoid interference with a possibly already running ssh-agent. Parentheses are not needed when using a dedicated bash process. ``eval $(ssh-agent) > /dev/null`` Run the SSH agent in the background and export related environment variables to the current bash session. ``ssh-add -q ~/.ssh/borg-client_key`` Load the SSH private key dedicated to communication with the borg-client into the SSH agent. Look at ``man 1 ssh-add`` for a more detailed explanation. .. note:: Care needs to be taken when loading keys into the SSH agent. Users on the *borg-client* having read/write permissions to the agent's UNIX-domain socket (at least borgc and root in our case) can access the agent on *borg-server* through the forwarded connection and can authenticate using any of the identities loaded into the agent (look at ``man 1 ssh`` for more detailed explanation). Therefore there are some security considerations: * Private keys loaded into the agent must not be used to enable access anywhere else. * The keys meant to be loaded into the agent must be specified explicitly, not from default locations. * The *borg-client*'s entry in *borgs@borg-server:~/.ssh/authorized_keys* must be as restrictive as possible. ``echo 'your secure borg key passphrase' | ssh -A -o StrictHostKeyChecking=no borgc@borg-client "BORG_PASSPHRASE=\$(cat) borg --rsh 'ssh -o StrictHostKeyChecking=no' init --encryption repokey ssh://borgs@borg-server/~/repo"`` Run the *borg init* command on *borg-client*. *ssh://borgs@borg-server/~/repo* refers to the repository *repo* within borgs's home directory on *borg-server*. *StrictHostKeyChecking=no* is used to automatically add host keys to *~/.ssh/known_hosts* without user intervention. ``kill "${SSH_AGENT_PID}"`` Kill ssh-agent with loaded keys when it is not needed anymore. borgbackup-1.1.15/docs/deployment/image-backup.rst0000644000175000017500000001346613771325506022106 0ustar useruser00000000000000.. include:: ../global.rst.inc .. highlight:: none Backing up entire disk images ============================= Backing up disk images can still be efficient with Borg because its `deduplication`_ technique makes sure only the modified parts of the file are stored. Borg also has optional simple sparse file support for extract. Decreasing the size of image backups ------------------------------------ Disk images are as large as the full disk when uncompressed and might not get much smaller post-deduplication after heavy use because virtually all file systems don't actually delete file data on disk but instead delete the filesystem entries referencing the data. Therefore, if a disk nears capacity and files are deleted again, the change will barely decrease the space it takes up when compressed and deduplicated. Depending on the filesystem, there are several ways to decrease the size of a disk image: Using ntfsclone (NTFS, i.e. Windows VMs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``ntfsclone`` can only operate on filesystems with the journal cleared (i.e. turned-off machines), which somewhat limits its utility in the case of VM snapshots. However, when it can be used, its special image format is even more efficient than just zeroing and deduplicating. For backup, save the disk header and the contents of each partition:: HEADER_SIZE=$(sfdisk -lo Start $DISK | grep -A1 -P 'Start$' | tail -n1 | xargs echo) PARTITIONS=$(sfdisk -lo Device,Type $DISK | sed -e '1,/Device\s*Type/d') dd if=$DISK count=$HEADER_SIZE | borg create repo::hostname-partinfo - echo "$PARTITIONS" | grep NTFS | cut -d' ' -f1 | while read x; do PARTNUM=$(echo $x | grep -Eo "[0-9]+$") ntfsclone -so - $x | borg create repo::hostname-part$PARTNUM - done # to backup non-NTFS partitions as well: echo "$PARTITIONS" | grep -v NTFS | cut -d' ' -f1 | while read x; do PARTNUM=$(echo $x | grep -Eo "[0-9]+$") borg create --read-special repo::hostname-part$PARTNUM $x done Restoration is a similar process:: borg extract --stdout repo::hostname-partinfo | dd of=$DISK && partprobe PARTITIONS=$(sfdisk -lo Device,Type $DISK | sed -e '1,/Device\s*Type/d') borg list --format {archive}{NL} repo | grep 'part[0-9]*$' | while read x; do PARTNUM=$(echo $x | grep -Eo "[0-9]+$") PARTITION=$(echo "$PARTITIONS" | grep -E "$DISKp?$PARTNUM" | head -n1) if echo "$PARTITION" | cut -d' ' -f2- | grep -q NTFS; then borg extract --stdout repo::$x | ntfsclone -rO $(echo "$PARTITION" | cut -d' ' -f1) - else borg extract --stdout repo::$x | dd of=$(echo "$PARTITION" | cut -d' ' -f1) fi done .. note:: When backing up a disk image (as opposed to a real block device), mount it as a loopback image to use the above snippets:: DISK=$(losetup -Pf --show /path/to/disk/image) # do backup as shown above losetup -d $DISK Using zerofree (ext2, ext3, ext4) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``zerofree`` works similarly to ntfsclone in that it zeros out unused chunks of the FS, except it works in place, zeroing the original partition. This makes the backup process a bit simpler:: sfdisk -lo Device,Type $DISK | sed -e '1,/Device\s*Type/d' | grep Linux | cut -d' ' -f1 | xargs -n1 zerofree borg create --read-special repo::hostname-disk $DISK Because the partitions were zeroed in place, restoration is only one command:: borg extract --stdout repo::hostname-disk | dd of=$DISK .. note:: The "traditional" way to zero out space on a partition, especially one already mounted, is to simply ``dd`` from ``/dev/zero`` to a temporary file and delete it. This is ill-advised for the reasons mentioned in the ``zerofree`` man page: - it is slow - it makes the disk image (temporarily) grow to its maximal extent - it (temporarily) uses all free space on the disk, so other concurrent write actions may fail. Virtual machines ---------------- If you use non-snapshotting backup tools like Borg to back up virtual machines, then the VMs should be turned off for the duration of the backup. Backing up live VMs can (and will) result in corrupted or inconsistent backup contents: a VM image is just a regular file to Borg with the same issues as regular files when it comes to concurrent reading and writing from the same file. For backing up live VMs use filesystem snapshots on the VM host, which establishes crash-consistency for the VM images. This means that with most file systems (that are journaling) the FS will always be fine in the backup (but may need a journal replay to become accessible). Usually this does not mean that file *contents* on the VM are consistent, since file contents are normally not journaled. Notable exceptions are ext4 in data=journal mode, ZFS and btrfs (unless nodatacow is used). Applications designed with crash-consistency in mind (most relational databases like PostgreSQL, SQLite etc. but also for example Borg repositories) should always be able to recover to a consistent state from a backup created with crash-consistent snapshots (even on ext4 with data=writeback or XFS). Other applications may require a lot of work to reach application-consistency; it's a broad and complex issue that cannot be explained in entirety here. Hypervisor snapshots capturing most of the VM's state can also be used for backups and can be a better alternative to pure file system based snapshots of the VM's disk, since no state is lost. Depending on the application this can be the easiest and most reliable way to create application-consistent backups. Borg doesn't intend to address these issues due to their huge complexity and platform/software dependency. Combining Borg with the mechanisms provided by the platform (snapshots, hypervisor features) will be the best approach to start tackling them.borgbackup-1.1.15/docs/deployment/central-backup-server.rst0000644000175000017500000001704313771325506023753 0ustar useruser00000000000000.. include:: ../global.rst.inc .. highlight:: none Central repository server with Ansible or Salt ============================================== This section will give an example how to setup a borg repository server for multiple clients. Machines -------- There are multiple machines used in this section and will further be named by their respective fully qualified domain name (fqdn). * The backup server: `backup01.srv.local` * The clients: - John Doe's desktop: `johndoe.clnt.local` - Webserver 01: `web01.srv.local` - Application server 01: `app01.srv.local` User and group -------------- The repository server needs to have only one UNIX user for all the clients. Recommended user and group with additional settings: * User: `backup` * Group: `backup` * Shell: `/bin/bash` (or other capable to run the `borg serve` command) * Home: `/home/backup` Most clients shall initiate a backup from the root user to catch all users, groups and permissions (e.g. when backing up `/home`). Folders ------- The following folder tree layout is suggested on the repository server: * User home directory, /home/backup * Repositories path (storage pool): /home/backup/repos * Clients restricted paths (`/home/backup/repos/`): - johndoe.clnt.local: `/home/backup/repos/johndoe.clnt.local` - web01.srv.local: `/home/backup/repos/web01.srv.local` - app01.srv.local: `/home/backup/repos/app01.srv.local` Restrictions ------------ Borg is instructed to restrict clients into their own paths: ``borg serve --restrict-to-path /home/backup/repos/`` The client will be able to access any file or subdirectory inside of ``/home/backup/repos/`` but no other directories. You can allow a client to access several separate directories by passing multiple ``--restrict-to-path`` flags, for instance: ``borg serve --restrict-to-path /home/backup/repos/ --restrict-to-path /home/backup/repos/``, which could make sense if multiple machines belong to one person which should then have access to all the backups of their machines. There is only one ssh key per client allowed. Keys are added for ``johndoe.clnt.local``, ``web01.srv.local`` and ``app01.srv.local``. But they will access the backup under only one UNIX user account as: ``backup@backup01.srv.local``. Every key in ``$HOME/.ssh/authorized_keys`` has a forced command and restrictions applied as shown below: :: command="cd /home/backup/repos/; borg serve --restrict-to-path /home/backup/repos/", restrict .. note:: The text shown above needs to be written on a single line! The options which are added to the key will perform the following: 1. Change working directory 2. Run ``borg serve`` restricted to the client base path 3. Restrict ssh and do not allow stuff which imposes a security risk Due to the ``cd`` command we use, the server automatically changes the current working directory. Then client doesn't need to have knowledge of the absolute or relative remote repository path and can directly access the repositories at ``@:``. .. note:: The setup above ignores all client given commandline parameters which are normally appended to the `borg serve` command. Client ------ The client needs to initialize the `pictures` repository like this: :: borg init backup@backup01.srv.local:pictures Or with the full path (should actually never be used, as only for demonstrational purposes). The server should automatically change the current working directory to the `` folder. :: borg init backup@backup01.srv.local:/home/backup/repos/johndoe.clnt.local/pictures When `johndoe.clnt.local` tries to access a not restricted path the following error is raised. John Doe tries to backup into the Web 01 path: :: borg init backup@backup01.srv.local:/home/backup/repos/web01.srv.local/pictures :: ~~~ SNIP ~~~ Remote: borg.remote.PathNotAllowed: /home/backup/repos/web01.srv.local/pictures ~~~ SNIP ~~~ Repository path not allowed Ansible ------- Ansible takes care of all the system-specific commands to add the user, create the folder, install and configure software. :: - hosts: backup01.srv.local vars: user: backup group: backup home: /home/backup pool: "{{ home }}/repos" auth_users: - host: johndoe.clnt.local key: "{{ lookup('file', '/path/to/keys/johndoe.clnt.local.pub') }}" - host: web01.clnt.local key: "{{ lookup('file', '/path/to/keys/web01.clnt.local.pub') }}" - host: app01.clnt.local key: "{{ lookup('file', '/path/to/keys/app01.clnt.local.pub') }}" tasks: - package: name=borg state=present - group: name="{{ group }}" state=present - user: name="{{ user }}" shell=/bin/bash home="{{ home }}" createhome=yes group="{{ group }}" groups= state=present - file: path="{{ home }}" owner="{{ user }}" group="{{ group }}" mode=0700 state=directory - file: path="{{ home }}/.ssh" owner="{{ user }}" group="{{ group }}" mode=0700 state=directory - file: path="{{ pool }}" owner="{{ user }}" group="{{ group }}" mode=0700 state=directory - authorized_key: user="{{ user }}" key="{{ item.key }}" key_options='command="cd {{ pool }}/{{ item.host }};borg serve --restrict-to-path {{ pool }}/{{ item.host }}",restrict' with_items: "{{ auth_users }}" - file: path="{{ home }}/.ssh/authorized_keys" owner="{{ user }}" group="{{ group }}" mode=0600 state=file - file: path="{{ pool }}/{{ item.host }}" owner="{{ user }}" group="{{ group }}" mode=0700 state=directory with_items: "{{ auth_users }}" Salt ---- This is a configuration similar to the one above, configured to be deployed with Salt running on a Debian system. :: Install borg backup from pip: pkg.installed: - pkgs: - python3 - python3-dev - python3-pip - python-virtualenv - libssl-dev - openssl - libacl1-dev - libacl1 - build-essential - libfuse-dev - fuse - pkg-config pip.installed: - pkgs: ["borgbackup"] - bin_env: /usr/bin/pip3 Setup backup user: user.present: - name: backup - fullname: Backup User - home: /home/backup - shell: /bin/bash # CAUTION! # If you change the ssh command= option below, it won't necessarily get pushed to the backup # server correctly unless you delete the ~/.ssh/authorized_keys file and re-create it! {% for host in backupclients %} Give backup access to {{host}}: ssh_auth.present: - user: backup - source: salt://conf/ssh-pubkeys/{{host}}-backup.id_ecdsa.pub - options: - command="cd /home/backup/repos/{{host}}; borg serve --restrict-to-path /home/backup/repos/{{host}}" - restrict {% endfor %} Enhancements ------------ As this section only describes a simple and effective setup it could be further enhanced when supporting (a limited set) of client supplied commands. A wrapper for starting `borg serve` could be written. Or borg itself could be enhanced to autodetect it runs under SSH by checking the `SSH_ORIGINAL_COMMAND` environment variable. This is left open for future improvements. When extending ssh autodetection in borg no external wrapper script is necessary and no other interpreter or application has to be deployed. See also -------- * `SSH Daemon manpage `_ * `Ansible `_ * `Salt `_ borgbackup-1.1.15/docs/installation.rst0000644000175000017500000003302513771325506020073 0ustar useruser00000000000000.. include:: global.rst.inc .. highlight:: bash .. _installation: Installation ============ There are different ways to install |project_name|: - :ref:`distribution-package` - easy and fast if a package is available from your distribution. - :ref:`pyinstaller-binary` - easy and fast, we provide a ready-to-use binary file that comes bundled with all dependencies. - :ref:`source-install`, either: - :ref:`pip-installation` - installing a source package with pip needs more installation steps and requires all dependencies with development headers and a compiler. - :ref:`git-installation` - for developers and power users who want to have the latest code or use revision control (each release is tagged). .. _distribution-package: Distribution Package -------------------- Some distributions might offer a ready-to-use ``borgbackup`` package which can be installed with the package manager. .. important:: Those packages may not be up to date with the latest |project_name| releases. Before submitting a bug report, check the package version and compare that to our latest release then review :doc:`changes` to see if the bug has been fixed. Report bugs to the package maintainer rather than directly to |project_name| if the package is out of date in the distribution. .. keep this list in alphabetical order ============ ============================================= ======= Distribution Source Command ============ ============================================= ======= Alpine Linux `Alpine repository`_ ``apk add borgbackup`` Arch Linux `[community]`_ ``pacman -S borg`` Debian `Debian packages`_ ``apt install borgbackup`` Gentoo `ebuild`_ ``emerge borgbackup`` GNU Guix `GNU Guix`_ ``guix package --install borg`` Fedora/RHEL `Fedora official repository`_ ``dnf install borgbackup`` FreeBSD `FreeBSD ports`_ ``cd /usr/ports/archivers/py-borgbackup && make install clean`` macOS `Homebrew`_ ``brew install borgbackup`` Mageia `cauldron`_ ``urpmi borgbackup`` NetBSD `pkgsrc`_ ``pkg_add py-borgbackup`` NixOS `.nix file`_ N/A OpenBSD `OpenBSD ports`_ ``pkg_add borgbackup`` OpenIndiana `OpenIndiana hipster repository`_ ``pkg install borg`` openSUSE `openSUSE official repository`_ ``zypper in borgbackup`` Raspbian `Raspbian testing`_ ``apt install borgbackup`` Ubuntu `Ubuntu packages`_, `Ubuntu PPA`_ ``apt install borgbackup`` ============ ============================================= ======= .. _Alpine repository: https://pkgs.alpinelinux.org/packages?name=borgbackup .. _[community]: https://www.archlinux.org/packages/?name=borg .. _Debian packages: https://packages.debian.org/search?keywords=borgbackup&searchon=names&exact=1&suite=all§ion=all .. _Fedora official repository: https://apps.fedoraproject.org/packages/borgbackup .. _FreeBSD ports: https://www.freshports.org/archivers/py-borgbackup/ .. _ebuild: https://packages.gentoo.org/packages/app-backup/borgbackup .. _GNU Guix: https://www.gnu.org/software/guix/package-list.html#borg .. _pkgsrc: http://pkgsrc.se/sysutils/py-borgbackup .. _cauldron: http://madb.mageia.org/package/show/application/0/release/cauldron/name/borgbackup .. _.nix file: https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/backup/borg/default.nix .. _OpenBSD ports: https://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/sysutils/borgbackup/ .. _OpenIndiana hipster repository: https://pkg.openindiana.org/hipster/en/search.shtml?token=borg&action=Search .. _openSUSE official repository: https://software.opensuse.org/package/borgbackup .. _Homebrew: https://formulae.brew.sh/formula/borgbackup .. _Raspbian testing: https://archive.raspbian.org/raspbian/pool/main/b/borgbackup/ .. _Ubuntu packages: https://packages.ubuntu.com/xenial/borgbackup .. _Ubuntu PPA: https://launchpad.net/~costamagnagianfranco/+archive/ubuntu/borgbackup Please ask package maintainers to build a package or, if you can package / submit it yourself, please help us with that! See :issue:`105` on github to followup on packaging efforts. .. _pyinstaller-binary: Standalone Binary ----------------- .. note:: Releases are signed with an OpenPGP key, see :ref:`security-contact` for more instructions. |project_name| x86/x64 amd/intel compatible binaries (generated with `pyinstaller`_) are available on the releases_ page for the following platforms: * **Linux**: glibc >= 2.19 (ok for most supported Linux releases). Older glibc releases are untested and may not work. * **Mac OS X**: 10.12 (unknown whether it works for older releases) * **FreeBSD**: 10.3 (unknown whether it works for older releases) ARM binaries are built by Johann Bauer, see: https://borg.bauerj.eu/ To install such a binary, just drop it into a directory in your ``PATH``, make borg readable and executable for its users and then you can run ``borg``:: sudo cp borg-linux64 /usr/local/bin/borg sudo chown root:root /usr/local/bin/borg sudo chmod 755 /usr/local/bin/borg Optionally you can create a symlink to have ``borgfs`` available, which is an alias for ``borg mount``:: ln -s /usr/local/bin/borg /usr/local/bin/borgfs Note that the binary uses /tmp to unpack |project_name| with all dependencies. It will fail if /tmp has not enough free space or is mounted with the ``noexec`` option. You can change the temporary directory by setting the ``TEMP`` environment variable before running |project_name|. If a new version is released, you will have to manually download it and replace the old version using the same steps as shown above. .. _pyinstaller: http://www.pyinstaller.org .. _releases: https://github.com/borgbackup/borg/releases .. _source-install: From Source ----------- .. note:: Some older Linux systems (like RHEL/CentOS 5) and Python interpreter binaries compiled to be able to run on such systems (like Python installed via Anaconda) might miss functions required by Borg. This issue will be detected early and Borg will abort with a fatal error. Dependencies ~~~~~~~~~~~~ To install |project_name| from a source package (including pip), you have to install the following dependencies first: * `Python 3`_ >= 3.5.0, plus development headers. Even though Python 3 is not the default Python version on most systems, it is usually available as an optional install. * OpenSSL_ >= 1.0.0, plus development headers. * libacl_ (which depends on libattr_), both plus development headers. * We have bundled code of the following packages, but borg by default (see setup.py if you want to change that) prefers a shared library if it can be found on the system (lib + dev headers) at build time: - liblz4_ >= 1.7.0 (r129) - libzstd_ >= 1.3.0 - libb2_ * some Python dependencies, pip will automatically install them for you * optionally, the llfuse_ Python package is required if you wish to mount an archive as a FUSE filesystem. See setup.py about the version requirements. If you have troubles finding the right package names, have a look at the distribution specific sections below or the Vagrantfile in the git repository, which contains installation scripts for a number of operating systems. In the following, the steps needed to install the dependencies are listed for a selection of platforms. If your distribution is not covered by these instructions, try to use your package manager to install the dependencies. On FreeBSD, you may need to get a recent enough OpenSSL version from FreeBSD ports. After you have installed the dependencies, you can proceed with steps outlined under :ref:`pip-installation`. Debian / Ubuntu +++++++++++++++ Install the dependencies with development headers:: sudo apt-get install python3 python3-dev python3-pip python-virtualenv \ libssl-dev openssl \ libacl1-dev libacl1 \ build-essential sudo apt-get install libfuse-dev fuse pkg-config # optional, for FUSE support In case you get complaints about permission denied on ``/etc/fuse.conf``: on Ubuntu this means your user is not in the ``fuse`` group. Add yourself to that group, log out and log in again. Fedora / Korora +++++++++++++++ Install the dependencies with development headers:: sudo dnf install python3 python3-devel python3-pip python3-virtualenv sudo dnf install openssl-devel openssl sudo dnf install libacl-devel libacl sudo dnf install gcc gcc-c++ sudo dnf install redhat-rpm-config # not needed in Korora sudo dnf install fuse-devel fuse pkgconfig # optional, for FUSE support openSUSE Tumbleweed / Leap ++++++++++++++++++++++++++ Install the dependencies automatically using zypper:: sudo zypper source-install --build-deps-only borgbackup Alternatively, you can enumerate all build dependencies in the command line:: sudo zypper install python3 python3-devel \ libacl-devel openssl-devel \ python3-Cython python3-Sphinx python3-msgpack-python \ python3-pytest python3-setuptools python3-setuptools_scm \ python3-sphinx_rtd_theme python3-llfuse gcc gcc-c++ Mac OS X ++++++++ When installing via Homebrew_, dependencies are installed automatically. To install dependencies manually:: brew install python3 openssl brew install pkg-config # optional, for FUSE support pip3 install virtualenv For FUSE support to mount the backup archives, you need at least version 3.0 of FUSE for OS X, which is available via `Github `__, or Homebrew:: brew cask install osxfuse For OS X Catalina and later, be aware that you must authorize full disk access. It is no longer sufficient to run borg backups as root. If you have not yet granted full disk access, and you run Borg backup from cron, you will see messages such as:: /Users/you/Pictures/Photos Library.photoslibrary: scandir: [Errno 1] Operation not permitted: To fix this problem, you should grant full disk acccess to cron, and to your Terminal application. More information `can be found here `__. FreeBSD ++++++++ Listed below are packages you will need to install |project_name|, its dependencies, and commands to make FUSE work for using the mount command. :: pkg install -y python3 openssl fusefs-libs pkgconf pkg install -y git python3 -m ensurepip # to install pip for Python3 To use the mount command: echo 'fuse_load="YES"' >> /boot/loader.conf echo 'vfs.usermount=1' >> /etc/sysctl.conf kldload fuse sysctl vfs.usermount=1 Windows 10's Linux Subsystem ++++++++++++++++++++++++++++ .. note:: Running under Windows 10's Linux Subsystem is experimental and has not been tested much yet. Just follow the Ubuntu Linux installation steps. You can omit the FUSE stuff, it won't work anyway. Cygwin ++++++ .. note:: Running under Cygwin is experimental and has not been tested much yet. Use the Cygwin installer to install the dependencies:: python3 python3-devel python3-setuptools binutils gcc-g++ libopenssl openssl-devel git make openssh You can then install ``pip`` and ``virtualenv``:: easy_install-3.7 pip pip install virtualenv .. _pip-installation: Using pip ~~~~~~~~~ Virtualenv_ can be used to build and install |project_name| without affecting the system Python or requiring root access. Using a virtual environment is optional, but recommended except for the most simple use cases. .. note:: If you install into a virtual environment, you need to **activate** it first (``source borg-env/bin/activate``), before running ``borg``. Alternatively, symlink ``borg-env/bin/borg`` into some directory that is in your ``PATH`` so you can just run ``borg``. This will use ``pip`` to install the latest release from PyPi:: virtualenv --python=python3 borg-env source borg-env/bin/activate # might be required if your tools are outdated pip install -U pip setuptools wheel # install Borg + Python dependencies into virtualenv pip install borgbackup # or alternatively (if you want FUSE support): pip install borgbackup[fuse] To upgrade |project_name| to a new version later, run the following after activating your virtual environment:: pip install -U borgbackup # or ... borgbackup[fuse] .. _git-installation: Using git ~~~~~~~~~ This uses latest, unreleased development code from git. While we try not to break master, there are no guarantees on anything. :: # get borg from github git clone https://github.com/borgbackup/borg.git virtualenv --python=python3 borg-env source borg-env/bin/activate # always before using! # install borg + dependencies into virtualenv cd borg pip install -r requirements.d/development.txt pip install -r requirements.d/docs.txt # optional, to build the docs pip install -r requirements.d/fuse.txt # optional, for FUSE support pip install -e . # in-place editable mode # optional: run all the tests, on all supported Python versions # requires fakeroot, available through your package manager fakeroot -u tox .. note:: As a developer or power user, you always want to use a virtual environment. borgbackup-1.1.15/docs/_templates/0000755000175000017500000000000013771325773017000 5ustar useruser00000000000000borgbackup-1.1.15/docs/_templates/logo-text.html0000644000175000017500000000024013771325506021576 0ustar useruser00000000000000 borgbackup-1.1.15/docs/_templates/globaltoc.html0000644000175000017500000000117113771325506021626 0ustar useruser00000000000000 borgbackup-1.1.15/docs/usage_general.rst.inc0000644000175000017500000000103313771325506020735 0ustar useruser00000000000000.. include:: usage/general/positional-arguments.rst.inc .. include:: usage/general/repository-urls.rst.inc .. include:: usage/general/repository-locations.rst.inc .. include:: usage/general/logging.rst.inc .. include:: usage/general/return-codes.rst.inc .. include:: usage/general/environment.rst.inc .. include:: usage/general/file-systems.rst.inc .. include:: usage/general/units.rst.inc .. include:: usage/general/date-time.rst.inc .. include:: usage/general/resources.rst.inc .. include:: usage/general/file-metadata.rst.inc borgbackup-1.1.15/docs/internals/0000755000175000017500000000000013771325773016642 5ustar useruser00000000000000borgbackup-1.1.15/docs/internals/object-graph.vsd0000644000175000017500000044700013771325506021724 0ustar useruser00000000000000ࡱ>  Root EntryRoot EntryF@̣pVisioDocument 4SummaryInformation( DocumentSummaryInformation8  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~VisioInformation" ՜.+,D՜.+,0 `ht    Zeichenblatt-1SZeichenblatt-2SZeiger (1-D)2SByte oder VariableK Rechteck VaFunktion / Subroutinemm Umgekehrte geschweifte KlammerDesigneffekte.1Dynamischer Verbinder KTextanmerkungrb ZeichenbltterMaster-Shapes0|_PID_LINKBASE_VPID_ALTERNATENAMES _TemplateIDATC010498411031Oh+'0 X`lx  Microsoft Visio@`p      !"#$%Visio (TM) Drawing 4LZ3cR}@}XAwH}@}lAwH}A<u+ !fffMMM333Iya͋V׻ę5@@@FFFG}oƒUJ:DT5I[1hXT@. .Ub##/am0U\?dK[&/~&b%2~ ! oLh $c)P?#~)~,,J & "& ,i 4&?C"/.?L?\.?o A  },,,'p/%O6$X6Q (}j?k)?"   *I3#oC=S?#A:;A;dUdU@;*dUdUdUAOpNOD`;dUdUdUdUdUdUdUzUbS)R~;4OL%=Qip;RRRJggRph=Qj .@]E' %p3Ԣ~|bFp#| | | ~ui??p9[!i fTd',50fZOlO~C.ߐE#PQc:  A ,U345&6&9Eaaj6 8d7?!%3 URg L-ak@9$KtiQHeuDDT(!cp;/paM-uh\`(+AT %5+H>>5a%h34ovJuX0Hp(!MmcP=?`(z?84?D0pA D7!U/i 4?Q`_1_JD;a3(/@}_L/^/L%a$G[aY %//// ??.??R?d?v???V[l_ru@+y_w-uaDd/r*&oo*o_Qv4DOOO\U$/@__o/DA/Gj:d2(joG2O5j7@oo ooo?&s2;IO[Odd 6 S( $b4@SyYkO(Or&̗T4RDP@+&pqprg-.UEd \UhřϽϏ____"LwnU2r?GXDeTeP贁Nkcc#E_挿¿{%ܶ/o Mv{&pqo s'9K@]o߁3呧G(x,+1fߣSBuD'l*|l$6&w-n!1q;qHVN`amF"%ȓz䥍 / /@RdvMIySe-T"bbbb$T$bb~9bIbyb~9b3c'쩠? 4<}J~>?!OXOpuOGK59&O@_Ǿٹ(n4Y--`e7"At4"H$ @Y,b'@ F7A-48E7 ;t4"H$ @Y,b'@ D@A-8E#7 ;U!U"#$%t4"H$ @Y,b'@ z-7 P&'t4"H$ @Y,b'@ l@A>- `7"AU(+,t4"H$ @Y,b'@ =-Q7 PU/0U1234U5678U9:;?@UABCDUEFGHUIJKLUMNOPUQRSTUUVWXUYZ[\U]^_`Uabcdt47"H$ @Y,b'@ rC-7EW AUvefgUhijkUlmnoUpqrsUtuwxUyz{|U}~t4"H$ @Y,b'@ _C-7E At4"H$ @Y,b'@ AjA-7Ex7 ;z23456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcd*t4^"H$ @Y,b'@ 6C-7E A;X4@H@RZgIR@4RZMR@QZ-MR@F@TQZIR@[F@t7aPR@8PR@8MR@49RRH<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(4E}ElRZREQZREQZREEPZREE7RET8RE8REl9 RUlUU&'/0 e4 UFD# Th(/TYTYBBUF~?x<F BP(?4| P?+ O @HZUBH??6*!v 07IB`Software,SpM icPeK ,>bjM k.E ,ZU"g]" e^ %* #uo a? /75"Auf da]sBlt zieUhnudUnzrVGrbgWvo)wiDtypBv7V,Qm&.zWg 2XnZ2a&Hrblt.HD" #  B>TL{&LC?:k2h>#8b #3 ABKAUF~^?FbX?FO~?F]P6 $A >7u`?-7au`b?A@a7u Su;`b]u  -"2>UF`0 \.A'1o','y/%-'-f/$>$M&d2TFKQ"{Q{,;K^?e;KuUKuU,"#>#2N_Nk?<@ a,JU3 AKC2 C>3 X>#AKASG;/`Vis_81.chm!04B70HHA`HABȋ4%4& 3D6 :L)%nX_7 aTAQQA2(SEA EM)V7C&L@nҪ@eAPQdR }fPstlPQgRKBB_b))%gN_5u_V( b2(6bT,@KA5"l'U;hmtibil#2(A4>$-69{[3bk3(3u$>L{x6c|qHdbgHUK s FI~ƹOWEF3#KpB U&a`o@$7o_PUFD# Th(/TYTYBBUF~?x<F BP(?4P?+ O @HZUBH??!t 07{B`8Byte,BiC ,NK bU lE"KY"i;nG DaC"h!TA pG SoUfC wm rE"S{ d!cPe ,>bjE kC  e^| 7  3u ,7=OasNDas Shp}eaufudZicenbl]t) ze.HD   3 B>Tp[/Lp?as&;.h> ,b MJ=UF~?FV*JR?F\.?FM&d2n?Q6 U AMA333u` ? !+5ChMW]u$g>5 L@X!$5 /`Vis_81.chm!#4E"67M `_!b-"#"'?9h:AiA 9 $''2ǥqE0&!$E0&!qP \@6%A]Dc]3V:?Ka!Db1OCK%YEiO{OL@ODJv%YKOO"NBSB53^@M^U_g_y_Rg@14ot'2TALd"2(Ech,@!3qlf1U1 m3 =M!l+TM9M]a6 `($T 6D UF~@xTiC;'h>4bP9 EUF~?L&d2?F\.?Q6 U AM| ;;u` ?*#)3=KU_euo>5 L@X!#$5 /`Vis_Sba.chm!#57_ 2#`$g!-*#*'?9  %&/7 aJ=?Iy+#&Z&P?B#2N贁wNk?@J'q ,A MVq6q4!*#ekab1; ?Eq6 I-BCb:@e2C1%>> OO1EA5~%5]NGxOC9AOI%\O`JOOrn_gPo|'TAFd" (?cp,@&!iA%9 -$b]'/2q`&!gE0f*laUmHE?KlT(]9^]-Ma6 `r (QJE-Os~%_s%/\h}Mf!>zbcrA bb Hsr ~L)uEW/>Fį#,KG$JB CS]Ba,`]@Loj]CPUFD# Th$/T YTYBBUF~?x<F B_P(?3P?Y ȅH?BT ?2~B 03B`T9>R4@Xflh>, bE=UJAUF~?F#H$??Q6 AEDA33333#300 0 0 0u` W?!+5CMWaku@#u>U5 _L@V"JXh#!$5 /`Vis_81.chm!#4"3 `!k(_%n>J-h)_##' ?9q!0!!g}PVh#@>J5A,aG"bCV"1_%>?O!O3G!>H%;iO{J1BCB6CL,155OOL=A_YM+^|>!C_U_g_yXXE+____yXX3;?&oOZe 1O3uniSoooUo3(2Oo.oORo3H_oYo`bO&I5rn7I[oEg 7TRA?"(8F,@!$0l#1U#H3 ?k!b\(Tm9)xB >T*a$`o@|ooPUFDfP h VTB UYuU??Fxha T,aaUQJ\UF BP(?@?F~?$-?P nL#l]dV^ ] $g]Q"U"Y*"]>"~#<?&#A&Br&C"u` ?PYu"bl""& M&u"u&u"u"u"u" ,u" ,u"4,u"H,66t#\"u"///$u)ar;21'u `Fxu21yB}BAABA"!^hO$,@ZLupO耑 @Ca4R#qt{u*gvXWcZufvvWuTj*uX{u ]ʤ *aGIJKMNOIQ"R$QTܝQVWXYC3333.3>3N3^3n3~33333333@33Vf4b@Vqf0φЬ uZ1[]^s@333'373Gc ƢEt&EAxÎ;BA}@c"3"3."c Ӣp x/3,S/e ~/iRfSIA62ab ZaLxVIǪ *:Jzךת'OgEa@braĠqAQJ`FWO@r ЀiC}F@`I@)@AEBsZeȓ3U ,puӐe(p 4>{ScNV:fcfR:bWKqP?@pA3VUbQw Sio@(h#FtϿF@ qRfa+_i&7=akVQhaRkQclQohaR2brˡF@#I~rW{ 3~TE}O bqO cy`I@Ё`sVRڀfK@ڀbooOlw wKuK`1s*8zK@JqyozI@ eoo /uB s!xx7SlP4;walPe,0kNq @Jq.-8r JqM9-t '-E1*dbt+r sQJQLŌH{M0tb;{O,=CƁS=ԯןƋM2'Kx~{עamzD1-?F|ǓnJ\˟ݟITjU˫ˬ7F:-˫YkIՎ_TF ?BP(!RQ!4 Iz{7 F.!`}~k QhaqY.!(SFxfb@&7! kRka},1ff",1~,1ff ,1WVHf312E1kaB12lQ12a(712,1HfWV3UYkaO} mlQYBaYO(B,1WVHf3ka lQ"(;" FHfWV3Y"]#kaS#" q#lQ]#"]#S#"QoDcjWV3,1HfEFWVCaHf``5d 䰈qGA8rL5k0BAK6 k0L5k5L5|0V!CBL5 = |5QQ5&t?U: =|5vŦ547 P@QzqE4HQ7b&qn24Dp)CTU+0F2o$0jjSRjaRD9Q#eM1sB=yIZR1s^lZLV_4pQL׌HH.5Oa 5.. ^fCmnfv3p` T]`avelShift`$xa_d@a3p`OΠ`fs`m!*_a@@ VV%Sr V:67sGr:2!l7s[r 27.1i$3s 1T$y@a@dvkQXU.q.'2rqty!,0}v`n$qqt7v-x8cʁEl@baa%]1]1!H$rI*J!1LLLDŽkq+MԄ .NAOAAsQRe.S"T/.rq.qqAWVqqFYpRBZ}/b[.)\r].QLQ_1(A`ٓza(.44.;[Rx a;`"[TBf"< 1W`B`sh]`cu g`f&+o]`m`t`l`g`l*?`0qH`.:jx +\T`x`bdvq_v!L5~*ϫ16DP`)_R`Ht%k` 7DVhzڠ2xQBuݿ)/\BanAM_0qx2Ϛ0AP*(O`a`*4dFxgy2?Wʷշۯ-K08&4Fb[mD{ǿDD!s 1ϱPGYϋ(6ѲĢϩȩ5hA`# &AsDEy3 UĻɯAv^2`OnwAQ9)(ĽBT0fAvH`Uta)Yp!3EWLnew@)ncbā Av9H`ER#t6Hl ~)99:'ϟoP$tm-At!BT)ங'7İDZEBs&1)/I4+r/o/Av(* t/% *Pa#/?4Vir4F:?@^?mAiCOOOO6O=nO Z\9?K8/ `v GAAtPT&xJb=rb+ Trt*P@?B#o54trptb T mkqőԁw`3P?squ{tcF~ϿF@ Y%ukp{`?u %,Vrq @V4`+AszSPsT3 )5>%j{ိqi!stgq xŕ2htbrQ? !3'󿂱/0 c{[яJar<`Ѡ 5 %*5}9ᙓ @r^rlA$Q2U lQQ؉uyhU+424r@1Ryɒ^rx@7.?@hzi?wԐH}߈7c6z#o/|UD`a1H_\"p1@&WrBdeKTv7.!ԟ@V) `aGo,o>oPobo1qyo>TooODowQS+F搞R6);=[vrrQv>LUF+q StFUevS߅x$~&qޗ݅ԯ1).UaO|tǢKT*ϰd4)'ߕr|<ߒM2/WkNȏ%rQ|evʭVlߟU[#Իg.gM1xweۿoo1CBSCS/e/w/S~eB]v KcVFO/GXiPdU:Ǽ ڄE-,/wb/cƻ͟E/?'?*&)BOZlcƄdUO7c*&tG);AEOb1ҿ)M_Cb.ϛ]!_R8Vb=x](' _ ݂?z-sT_Na2s/@.g(O]Sa5 _;;L7pwrra2wcCB|WV.gWi gU1gtO@|{럘p  D g,l;;.o@o*aa Oo0Ґjo2kJΆb`b~Mk 2(s.®%)nd\S?l$`-@K ay!*t1K쐴^VQ2CIUdUpς1aanOa7P7߸y!X16BòJa@a WE1!UަQb@72Ce$6N ¸ջVcIpb)L(aO[Ne4 (ga# f8 \ 2d T!bu-(;M_qڋgֿ`4@?F\oA`0ΉjM09 Q(3b8ye/˸?FƣE?Fp5Ys?F5&!Q@Ή( 0_Bԃa2U0*s3?ܵ0Ԃ2 k+ݓ0]'%Qn?Fѡ?FW^bd%?F?Ԋu-Q/c,DJ}?5^I,QIƥ,$~//'% ?F*^Y?F[E?F|{T?c,|гY,vݥ,-?C6*~??b'%OZ`?Fa)P?Fů$_?FPvϟ'͚?c, h"lxp_Qҏ,mV}b!)/L FAOSO+~'%`0DLJ?FNqVUO:Oc,~jt7A`,ףp= ,7d__Kh'% QuU?F`4pF?FLAD?X@t]_c,io?JY8FZ4? qۄV$N@a__'%X/~%l3!?FeY?FJ~ӗ oc,z6>LZ4kw#awivQooj'%'-q?FmUa`2#+?F(%oc,Y7 }j GfZ4 ~,ClL^!UFV{WE?FcJΦc  )?H},""3\ \ -YVqjǻ )1̶ Ѡn<δ m|ѠaQaCSVGh4cfUpi$5,)֝F3L&AP"iЧ Z3*p#q8 |`rXӗze2ZՊ@ #5.?\zi{B  ȧ#ޯ&&JJ\BMV㖿t侜+Ģz`rl%ǨΩ)1@8"4߀Hߒƿ./1*Tύrˤ]c-=( @=5-2-9#.@"~C Ȓ#@ 7?D`Ofrsetx %q,kڡv hE 5ɞ)3U+KGAb TrKvlԐhi"//ׅDzW280//?(?< ????QcOwOO)O7(QOأTx7HU' Xߞ^Ba5a8A88F4b./#Dn0B O`]@eŜkol7]j(PUFDfP h-VTYYU?"H$ @?Y,b'@?xTT;͑U@ !@ү&P6 lAu` u(Jݩ c?EDe34 D   J ũ2qL lvF BP(?L0 `LineTray sp r{ ncyz۶&` v%P t e n&$`u v%W{ igTyt&a` v%_Rouy dw")g&A0'2A{Gzm?>5Q?A^n"`Fw lo0~/+n"j?|?+5/!4`S:Ta ow?<*+` 9?& ` 9S5@y0eK $KXOf@s{ 'B $KYtO 6`9UM@gy i@i04Ai '. `%9Dw AcNul>k$Uhh2Q2QEXT/J> 09([_q_[_o!otgU__o_oTEooo,o1A>Te\Ls c!2TCt1MU$U<"itjt AktE*Q*QURmtQ!!otU p qU r s-3"!!uGRvTlwaExn11Uz{Y|})~ARɄ22քeㄐqJ"T c 14.m"?<%Rpsf0M8@H8 MEJUU8!r5GrK&HZ&H͟@G+G3"?GERml&Jlmyome>mGG-GAUV>6HԂyϿHႻFSP6HuANqe (=MQ_'0Uhv$on!3` LDnFxB'3` A@iBݞ"4` C m@bAxB}lewL9WIFE/HU}= 6p9yE49dFY#dMdZB `z]@et`|o+De[~]:aG7]PUFDf h-TYYU?~@x]@L'} V6lX-1u. Bj2u2Z2j2#u9r#ULH/MZ1+B#AD5 60`Vis_SE.cTm!#20AD%`>Copy<0wigTt ?DU1f@M)@c<0o+@'ofdBUArX@Aad@iV@n3@ WAl@ef@R@6AtBvBb@hB@lBAJ@=## AoG?9/#n"E&444 7I#$@HB59_lj#"SV6U8*l>(Uhn E /J$&9"(Ln eg5Le#pheo'T !! Js~o/|R12'"U7A%O3W_7xr5(-6 `F2 xqu\J@#:0b)&`AAs)@E T@xd@Di e i=M]UJh#HT0#3 UEJ\}|U@f?@3!?F~?FL&d2ٺ(1?P Ǜd#pa HkaQ"U$"Y8"Mx#BU<&?&@&A&C"u` ?F]o"fpy" & G&o"o&o"o"o"o",o",o".,o"B, 6 6n#V"o"///$u)ar;21'u `Fxq21uBAy B AA21"!XhQ$,@pup`Ca$Rou@+u*LfXQc* IRg fbY-H- M ZU ZZZZUZZZZUZZZZUZZZZya-yyIvJvqyyMvNvOvMyEQvRvQyTvTQyVvWvXvYv3* j|%5EUeuu 4 蕺!BZ)CT@x@U @n@o@a@%iEPn*Љ4J1[v}y]v^vc@x3+"" qؗAU2Aym@xcv$"" zp$E1u/w3,#? N?$hVW9AB[bSq20S hV1Aqׁסױ!1A7Ig?abrXqkiqruׂ6rS@6'_@rp9SR2MV@׌aV @aU-ܕ?XrcU:uJed*ɮ᧤N@q37sfjBA'b6bjB'#KPRQʾqjeArQ c@(3FA{aٿA3b!&»F_ v7_M%;f!xjB!s(a?xjB,2brrP#VFI%r'#7 3%Р$UM_ }uj_2 ׂP5GׂWsZ2bQwvPQrT_@|wu‹ׂPc?P 5uR܉͏Cgyc(` q(`e80{A @9-܎ Ʉ '؏bAP42Dr \sya@Q_X俋NZ]b6 O6͒z{ # Z{u£@%ЊׅqmϕDR1oL^c,ߛѯgZ $j%{|ב R{iۄ);qσ⥞occL&d21b}aN/9K!KG'!cpK1a81YNSFxBD $e0Bof$z:cRTrDBʩya#HuzR j"^vc ` T-pavelS/hit`RHq/tq `OfYsfps}1 oa@@VVSrlV F"1+-27i41Tb4_qt_;aXUZ'2rq_!n0^Spn4"1PT_sE<raa-A-A1Hc^IpJ}bL;M 2NQOQ1\Q\QQؔcWRS򔆮TqqW&o$Y@RZMb[Z\g]tpapa_TAQQAbpJvq;Ϥ+bܤoa pQ+d0AW`BfpsĠh-posugpf&+o-pmbptdp^lbpgfp<XR*R?`0ȁfHT` jd0+9TfpxrpbASv_vPQE~ 1;F`a`пҡ<Rfp%tknpr !3EWj|Ϸ2HaB^׺/9Bmqn*<0NHBew0yQ̀~`ϿOdpahp+=#bRdﷰ4z:$ߢ B<R ai*rp iisXmTZdj `F1H<zՕ{xMOai?ޯ!_` tD.Jr5<}= ` K* `[Z l v 3mAxD{z{.gMbeᒎcQɥģ"Ĕ,/ħ<w{) ѫcĔpĽ} ѮmJ5N13_MsSspy`ID?S/c}ba K"eb@o> )עa2¢ `DVh%# UD kmjBt2ߧ߹$c褣%!rr[Rf`}aTy7[C3DnUbe|b;ruv`Up0;nr`1:븁X%`?$2$N1} Y4Brb@qAa-aa;qa `//5?*?cT Z "!0ЏQƽP׳M+Q ܒ*QDb)QX\T׳a_uu`bېRIu}ܒQ (l\O~?hzP-îLޯ)ّ `{Cݕ2YǕT `ƲG . !ё1ɕHᝢ WGMĪ ?Ɓ%Qx?ˆt/$Ͳt ڔ,ş$O4  ,o>oPobotoobo1*BBZВՑ7zBoaa^屓aG9gl6ҩ$xrNPƯدk)D _& 3EWi{ߍߟ߱ /THZld Z_VjhRdv=X|w$%F / >Pbt~44FJ3x<T+///(/aNO:QN9O:////./??'?9?K?]?o???1???? OJEOcUQOOOOOOIYvotuoacVh͟y_T_ j &m toB)j ooQvwёʮhg_#F K?F&?x?0l}G{,:`@"   -?%aEr%Õ))EyXR} o`д$#?ęxQIr/xUt,bwXUl0i1ewvv֔tQ=Q)R}h4FƶwGYs?CpYՕY 4e%{QHvsLQgŘ{)tUbzf^ȯfܓ֔% E:a`*q>^TbtxoMdǏ@هT/f/˖{OtC]c VqǐI֒FV –nUF?vn qsɿ *Ys?F>?F:Xc?FMڪ﫰|JܡtQq&A=QngiC21MwZ漣A$rqg0N܏tE˔F*CUy ,Xjw=Oas߅<`ߦ7 {RC*OoNkrfŜo-?QKe_? Obweoz}&U.V@U4kW5wR<k 7 fwdůWdvw.ϒ,u6^.ڌ@(]/5Uk@ k@Dw,|@q K0/'LK9'Cm0ϾSBs2ّ5)\ls?1D@ q>Aw9#uQ y~qҰq٪$E)U5G1r9{ a`pM>AkVRbדL!.UEG!ܱ'kWH٤*Jn}viIpU'LG(A^|{Օ4߭9w7;Օ f!+S2)kt/'م"Nj$6HZl~P`4@?FEO jM󎣈PuY Qᦤ8%/˸?F#eֻ?Fp5Ys?FwtÊG?( 0O(LпDJգ<}?5^ITLQIjL$?~OOI5 ?F;?F[E?F3Ϛ)O(L|гY<ߍTLvߜjL-C6*C_U_'5OZ`?F3$i?Fů$_?Fir8_(L h"lxHOj?FeY?F܇]}(Lz6>19lTkw#N<vuqRd/%5'-q?F0 B,?F2g#+ׄ0]/٨(LYЍj@ T jLCl#15v?3FV{WE?FD~ΏJNk(@(+ WIHg}jLzB՟$q$D*P!)=6?zH7j A^ 3=< 2Aġa1aS~h4'+5.ϼ7O9H%F_p8Ɋ( b-SlJg&ǯٯ>qA>%7IWt?&t:NO/ag#mҳ31>ũ!bX,!.@-R^rC&Aϵπ!EWiSh:Mu^9a.^;g?&h&8!AOϓ{EEEW{>h"R7"(P!1('b!)1jUP-29Z3ˡ#@$ 1d>"wCCZ3Q%p1bؒC!p2@&7DZ3`O}f70seto?uܰA0&2,?>?P:v72p&1"E$A1oٿA32ÿ&A>+5R{w,0';)w-eV-jUCU+ a ?%A'; WTrvt0lghin0w8M=Bb7OOO;J!HR?+vOOOOO__????(?q>_]?u sEtjqttHD: #  h  T0]]9 # B 2AUAɿAhY?ٿA,c2ֹƿP6 Au` ?A@j$u#J.9axu`u]`bu`+u >ڍU@@M&d2}34"ÿDC'>"M-w'>"D(p A "'$"&H' O#&5 $s&]$2JD=#D> D6#U`l"_# #1?9#H1 uNWzIs;2 8J#2zGz3Fx$2[Ru(/YB DU_V?\.?/B:o:2A7P`BackgGPou_ndCM`lM`HQz@@bZ}H D?h1GA #X8P6d3662Q1CUBF4i7o117;y0~1Mr)Srm>de}mspTt "qq"1 1381812tMItBB Eb'b$4ANu13[h2AuBH1BʜA!ʩ¶jÄ[QЄT݄zrf>!$0 %3To1/u/u?8XxAC`IWsSwSwRs%o.@0RgPf KNC`teI`\ai!nn3uZR%d DM`kO`mtiM`5HQ-2HZv#`NC`o>!8V@MKsEŭAد3dVrbRaPags0bjkTykp~1])S搹qO`zfY`u^s;cTͱnA²oivlPA$*q#8@q#5$rQQR*E?eNRעQcBdg:b`OTndGPuNEb;StRaaGPdB[Ÿ-uijd[„6k~]QRƀ@gP@ A>LEs?e\/dϩs Aee"g 9bY`dt2D-Ÿpsr- Sqv߈;`aݔ֑#pS~>"ƒE*ȯ:b` Txk GPp}1Բydc j]aaAe(Se1dSrʴ &e%\g#d ɿRf);M_=7DßӁ„{81,[/O^kxÅi-ùƁ3 uI[Uu`  *L%LBW?AP)Es̲aٳTe"/)#We"ɧ??? ?:OOB&c_ //Y`6?H?Wis??]CJ_ oAUCGP?e`(__! m2oDoVohozooo0 siԛOo oW}_oOOOcV¯]WץR=Oas-](]uќoo~Gm#oTw {Ɔ҆΃S 6:0Bw)s͟ߟ'9ρJca:smQm Krаe/Ȯя- voP* V²#5VUo"/%Wo"O _._@_.o*o/**u(!S[mJ5li@eu+arw% UR°l;Q@nt9'G} !M @o#1Ϡt+o_1OCLRʿ.@=(*jMD`sprkND߂ϚByiϹV" ?5G&8߭\n߀޲5F/=$2&"4FXjj"j"/ɨu*FXjv_?q????9 ???Qc0OBOTOfOxOOO8B+DO!O=`Rotyfb)]O( !1idBm!?ȅKbB;rbrc`R@*@;H(iPB_)/BE!1v])g%oks{1tpnN;GWewyuPz%doaqm Bqo2;a2wtSkmQb@nvUaTtnsb bdds°bjTq3o: rrcjKbT!E!222?kl;!30kC2m2  aZZ!*kψC\\E&&BGO ='`C²!cRf Ot9d_D@qA,@"hi."4 b bA' Y! Pk?zz!!_lDZU!!'7!ƕP d>"p}9+6@,1X8d:UPH1/0rb4,1)*AA/_"_o & #hQQ,0#` B scriftunkg,1 `CWalPoPtQg1E}u+iH5u1! 7 @@j&@@q{`?@rbk?de?s)gUA$ &?$%vr;#xAZ%RiLE" 2 0"U$2 eur! |u[]4 t&&(s~rzlH1AUH1?iiu5U A2z"ry"!rByǤo VTPQayl p.-LPnPʡTn/k?Pup r 3Qiߢ PuPws 4EPtIkrJPo?PlΣV?PrPRʧIE zmr󢴳 5D b9TB*nPnߤz w 6VO_LA!0jA_(S_e_{W'6&!3A(P!|/2g1 D1y 3 H1D1oE7@q;E{e|lE1@@~!@Fg#?F"??QkPyA`Q27W(ATl 1<25l =clYmfxAj`PL!@@[g?+@@1#_  +??@*o4?PD}p\z-uru`u0`bu ,0@@"4o@a/T("q6y /B#u2#Q*"0#ᝯ2,`6@@ߊn`P @$Rxww &;01O5l^3);hK(a~"f''?銞dYկd#j_ubsUTK0a?DbB`@_bzI/!*9`P"3\o"҂3q55"1I7zrHQQr9:z:a:a9aaR=R>rrqdQhQrq(Qr q2aatQmO\A[|RlqlqⴋrT,@rLQux҅t0`q^qspC&Av"rBpsx^֔u~*k sa4azuNu ca@bֵf7sa9@xU܅-rQ9PCaLJY„f6D/@r*PPV#ROj&TP@Yn`@Q.%Z$yaUBaXfpUu!`4q х4s q, to! ТZhQi5,Qrc3LP@j*(B<@b@t^?$@o.a#@PIKr3[b3KlqJj/ \Ca/O Yn`5PQYYCZOǐ2p:F>pKBrOgOq/@LKHKEKːYCJ=Aұ)VZn` OzOOOOE^Z___*_<_NXQ3h|GCA|epPKd @A[-B^ AYsjT '_*%kŽ_Zoc ss&qg1]JS|([w-v`DbKOb=~h>fkviKleN{>f8{gyjPsKϝ̏|vTXc[עHAmeZfDo1ock2DN&xݟ^p*wwjo>f?ʗTٛg>f,GY^4Fܢhn??Qa$%<}Ac)#]KAt^ FA1?0_P_b_k?@ܟ9Zm(͖pB?@qK:?@W@ή8W?@S^HssZ*m(5=*/P#&?@!W/?@?@F?D'A&a?@0?@vj6??@^ŴBI!O&J6/iлH$?@ T9_3`?f\Θm({E=T@m`mЁ?@l6͊`?$Zm/~&0=ficUo@@K^9`lخgA%=,^?@tA XP@@z(fkT?@Ô1fe=j.ƀ?@P3{@@5 I]ͻ?@+ӟ wu=~@h{?@j"~#HXt$'=)&3I{(?;3 BP 6Oc= }…lO'ƯدH6[0 O۶f_@3l~ !H<@)>PbtφX?<)H\t('A)_qUO}Oash8\@?x<@fb@t^ OH3q$y)AᵅO(Ld?x<ǣsxqM~^ACi#Sq"T#25(3 #A/'2U:H'm!D0B 4"!` TextwCo orz`` LainFn 3` Asa&!4` mpP  Aj({"AFn(?A!3~yZ[O PA4wq 'HFdAT(q_q3.gq+-뵅h.((zR28y3yPU)^BPԇOOOOOOO__)_;_M___q_______1i/{/^X" 6q'AS6m#p Pya d4//*/E$B ?' `?FM%# %?)Q.(gMACoooo|+>,",x>Pbt}R,>bMt=ζȏڏG"fFXj|xU,įFX*@pԯ8G(ma" AS$4!̱ q 2aaJYĆcy"ȵ08̱u){`gl6ߛ @@L"@@Hh4?;C¹?FG[r;1֔'u`Fx8<"\YP"Q!cZ"[!y!!2..Arq´"T,@-@<إN`-9-L7Cz`.WAoubҟ Ab;sż.v̟6 ˟W'?̵4!Z)4TB`xAp`n>@tzj7X3#|گ2п6=#X)Zܽ?-qry ~ZAu`u`6@rbÞ?5@hȩ.p@Őc)a&M/_)s6})7Up`uR!cA*qK2-[!*U)aYӣ#Հ@+("euF@X,@@cP?$ 9/fPtwߡ2wy#wI`rSbK3`q a\~a c©Fǐ⫣  Hː5<Nas i!3 w%<;㐐/?/e u' @N2 -< N6£D1 5s1 .h fx sN!P $4b ?O0A&GWA///&6?+//O*`׹m(D11CJ/"ǗNO`O//??M'X'jY K"K(;G"JK>1?/KK__O]?o?_"o??MR&vum]pɍ0cKdoe񠬁lOe񑡡ojFxKk@[?vuS‚@Rmw@|mƁ NGy}Aog6oo?M@]f?qoooooo\ke?__O\Z ՁX9/ӘC_U_KXF_}6Z?Fe?2”,zNt/DF`0 &F1l aPdl/BjZԘl-U%{ 酼N[bzf^5OHƏ؏bo2oBbd?Ffl6&FF?FuWuZ}1cԚ\ ϥRsBTЯ*oo`GÊ ݢ{Կ̗ޟM2.RkO֨ϸ"4FX<ߺ̱ 2U9MSiO@]ўyd!4q<|a|aSeOZAkQw9+OO!@ Ai1/C.</bsPbOo@nZA">Wi{;^5˩?@FmtĹf~:G?@?@;:y?@A^}/9*[Jȏ :g}= A?@I l?@C"<%0Md 8#(m?@'`sI@DϜͰMUh>OmHaoW?@&N?@?@+%U OmHg6D~)cK?9?@_1n`Um,H_mHﷃP,P 5b?@|XYm˗_mHOgOw^ G/6/?@$!?@d%FV^_mH+5gf ?@qK"`:= ?@6NZ`4olI5gN@S:3ί?@foQ̄oȡ4$1qY?t@  O=rbK{!v '炏ÏՏ" ?V N x4(1(<1> =Ÿ?8Aį߬X,@@cPx114<1U??D$yίU@`0 Q/Si ^1Ts] <kQ'2qiq GQ! Q#`n%bC6a i@bpИCa zq`%`>qW,@Q"acqB@>q݁oӂ_=@`{k*#pq95"+aq@uo}5ub lҊq徶#Ž#UU)*/023456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdTUUUUUt4l"H$ @X,b'@ 7PGcC-T8E A;X  !"#$%23456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcd*UUUUUUUUUUUUUUUUUUUU                          ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @* A B C t4"H$ @Y,b'@ C-D8E An@SZRR@SZPRH<(H<(nELSZRERZRUz`8MU?R; @3L&@ LFDTyB tuh$/T *UF"H$ @FY,b'@Fx@AǁBCůq !]2 H  #-N!-|1` Ba0UshSitzn `Ca lozC",>1R V*a0riE dֲn^Ͳ ` CڠnE etڠHAʝ1zL)vGͬ!?EHPb1\BB>C>d~  N"H uHLy]1#b7@=AK P $#N"=OasN"%~ Z$$$$$$$-$A$UUU(Q 4/4dB|# "$1)&~C`SN"Cg-}#V}}}U } } } }U X%W&N#3 &r0IQo*ƒ%̀S!+A{Gz۔?_%Q? 2` S ]ao-z@"//` FܰPRtmױn&S袝u`` %.` ?1"@6@6@1u+ A[cWnm'?C41`%TSaispr"W2c#w2}??<#1`)?T;" +Smyx # ` )WXOcf`em+H 7KYFOЫ `)Dܰ1i%`J)Mgibi[@!K_#eZeW2za"Fw2 `Lܲe?<V@\rXbTb }TaܰҺ@z-V ^V N?V_V]rXbY~[%~[1fe*e`ƨa˃]bzec>e{l-C#)$⦮ vP rBIqdœIecQש3G~cPaess@` t_rlPu-`u.`qߝx~$ gD)ob~eũRőe{n e\2U]?qÀ?A?G beZ ґRVܰsPRRD@Y\m#E0ѐ3ޒe!|:ul|a|a "Tѥ1111ob3b11:rQQyd>7$JH8Wr9d:qы$ҡ=>_«ino@+r,qٴH T,@Wroe0WܑѰC@1ouzBqe[A-hrB0s:u)f9;e-- 8X@'ZGQ$Y-IGb́veɡvIbvJbvKbvQe!vMbAvNbavOb́vqeɡvq!evRbv!eTb!eAVbaWb́Xb̡YbءIL 9  '4%H^)qT x* @ nY0o @I$pХ)˶UaeةeD]b3^bS_bs )vae hO`dhcb "h p=a;õ / J/,$ƶe4?Q-<rZQmarUrRU@T?@rϳH`f3z6]@|YOa@Xa>5qƶq#ϡ3 Q3-=M]m}Ab tUHPuJPed@Wwa<01ȳ`aUarA 4S,@:Qɳ;@a3SzVbR(JF@h4F@A̵Ի`O2`<0tC0#`I_0rp 3_0X,qrz? ql? pPrF0arѣqucDR*fH0b@o5o?Ylrmgq{eR{rcjH0EaijF0 o@HoZolo~oelz^oo xqDSڜgaƜekt @a{-bm~ 1arZt 'i{ŋzÏՏb sQA}&!r8P{=DdbkO^ y 'q 5G8S'|{R"x_01{am`MzqD{1ݏ!,>PbPt믪jh  "O ',̯b6:_R@Nt?A'M_1VwR1w[RtU"QYA` !PR}Kt2a'Q~aLA3SqFxyoLs!B_=@@`{R[n?D"XtT[u}qɒxSyqEtOF tX>>BH3IJ*9LD*MQ N^OkammsQR0qQ`?TI UaaIWӄaXY턒ZR[5r\[r]!3Q3Q%_;eUU@ I`V!]ad[[;|B yqudP ȬD}|XP`BM`sh`Vcupg`f&o`mRI`tK` lI`gM`T;b,,,b?`0 brrX@Ӧğ֘XP+TM`xY`b#<5yrv!#{-~L@FX1hB@}LRM`ҡt kU` ίBT0X2h@gy/&Ban˯׿YY*Xk(yoρϯOK`aO`CbW4h$5@O߸ mܥ7ƿ i*Y` Fipi W-kB@9K]ﳦz npI`ab "W9hO@/a߰SV lO`sY`pdKmepW18h$\@ )}ބ,n W6hit`"&ys` F^pQ b# gv]ϥ UkŠl6/H/ g/P3/ #ꃂ@ٷ///,QGwŠVc?"?4? U?@Cm?W c??/0! (=)O#O EO:S]O KʝcOOO/±O?O_ ?_@cW_ B͏___%S Rd_o#oEo$s]oWxk/oo/OErd _)A QĂOO_±uo{  / q/$S@rbޏ{) \1aasWU1n" 0LŠȃП{ *A2+_AA4e-SK⺱p߰߾Яb̳ 9?QD;MD|s%pѿ鿔 cz_!S?ϿXP#S A*s 0&hϙ͢u3+_S7f\2`Ownqߡt:QEϦQ_fPUmQ+=NfyZ>PLmns?": _rf,PRt<ݏ:YP=Ϗ$Zt mptm|Ù?K_3:C5)BymsalnPyqI4_X /fR۲th/3+:P3arL4/,_p// /S;%n p\) ɂAMmtR"?:/+  Qwb$>bU (lQ4gUU/GA qG@Nt?Gw?aQDd =psAG /p8  ͐|љ͐kљ͐Zљ͐Iљ ͐ ͝+sA/@Rdv*/N/r/ZO/~O&/OJ/\/n//// _/D_/h_?"?4?F?P)G 26er fGm0'z(MH1Ct;Pt]t24jtqwt=tatJGt<@ GUt Gt GɭtPttGtYBqA_M{w2nws`MS`IDTSSt)}#5EJ\d Katgo0`innqapdY%d Dku`tiur0$Np0r9KC bPrmqW*aAaXAg A0`fkTyp_CDnObj0;G0`u7p;SA}rxn zANqMrzqNqqqq.qa M#ǠETc%])deKrl/v// ?u.d?!K/D,>P?p<$e'O#KOY# d R)wO'9@RpP>"8r FΏ E[}4Sgi[!oCsN ;E2naGwyZPJZda耨!m`*$aolZVpt`t[ik Et bpFiD&Nebws@QKBqR6rlt a0RU q }"@6qNqLQhquqΕhPU+s3r5p5PyAHu$AmA TywBT8s@ev@LP`<bu$wǣΑM vB4tl6f?rq$ Nt?"y0u veΕ2 uxAA   ^RawQLQbwUeLQEu'}a;sQGv or~tx c?[ЖǖkTqx30 ?HsuЋa ,gl`U+B{ |UykUyZUyIUy dGv 'UyTz ASssa'}8s jB{ R= ?nTz T???w"1P ߼Gs/AH˭F !?F:Y?7q?0MQ@"Bs  -w?^%D1rkbob pwkbroQocIXm@ O$?j1y!Bua%YT*JȗFl}o`o~ SA uj*B@AkQHOO.!1Gb2xMrvoUfbzf^CߏKo)Fs 4kQB{w%%_7_PI_[_m_rQqVT__?y4__oneONd7wv^o]& rFH|bb\qakIuWUF|> !dQopf|D?F:ɴgKݰCO&͐")ďA4TP\Z{Bs¦!Frƀո32҇|X"AecRvvN"ob!\lpf՜oofN`)ơrT䰳WX!˯___3EWCǿp^npog5NjooZUB| !tPoEE{~ wO%7yA{T$ nƆ&؏P /2/ 5/24moEZ' n574F8?m!ݯ4FXO&O;CF2|sOOOE)/G;`O@V=c9W3?Ml$ݮ*׆%zoN6W'@{ؽooj=gn)NMlb ;as]bt}rrD)_ lPwP PDW,"\FF9_K_5QQ Z_0$w+x[__wQ;x=v"3c9q),o\C?wP-@V Q*!VܐNʢaA=NI`oU{1QWyRCit ?@%:?@j U"p?@1 nИc˷}?y( 0 Bsa2_U0*s#?ܵ r" k?+ݓ h#u2ng;?@dAc?@6WK@D?@h6FϪN\nDJ }?5^IQIư$~x2ّ?@1?@'/܎w?@t^/n|гY vݰ-C6*//m20Aa?@6I?@ ׼?@Uk/n h"lx p_QҚmV}b/L FL?^?62.?@5X6?@;CMLť?n~jt 7A`ףp= 7dO!OVX2h8r?@Ds\_lL@_JThOnio JY86e$ qۏFN@aOO'2 ?@m܉9pgXvg?@&AhE+_~nz6> ~g g8 d +j 4! z_< ^m a{\a/SԤL@4@q`{t@c&ፎF/bXAП␂e#9 .qCPbtď֎F$D"ؕ@@!J9ȦgFetMx|ӗ 1CCUyMԻӞAﮧ*$ĭĿl~A&|Ҙǟٙ4!ӿ1-yqqσϕϐѯ!)<$5_"x}֔hnߖ8!( !%P8 29á#s1Ҭ, tCu*cTþ@Օ7JD`Of}set ׾?upUlr Ah4F@A_ >+  |s1,v呂s1s5%Us+q : ыAm Tr@U-hPooooo yKs؅Yk}Jt .xVlT¤Ujʏ܆MqR*A@mrEgJtkIugbRMqLzi.\krifIⲷ))6pq/odTr'@z3s1NTfф1FDZѱ%Z j9²*^W#ѧ؈@a@|A҆vBU$~Q%̀l[A{Gz?xտQ@ F& lhGpH@J` !N\owS^` PBjr@E|A`#n|vnyM\nC4`xR S@yRE&a7 ` X; Y%t0. K`D& vcBAE6Qb`M g@'! u#V"`5OS@TB# umNYzsh t"~&1bϠH@KKKGUGGG GTK G Gr 1d34&32zz҄bCF BP(( LBeR@dB/Cl32`Il sDaP(24X_@YmBkҿ@q?@a [??vêCuLYMp@bB"p@0!uYdQ$^"AR#uAs)4¬QS5_)? 1>x %?%6ϑ?\.ް?A@P?C1a` B}gQJ`l sEkheQhWhSX>o47Q@DrFB%BmaloZeUXpTIQDhR11‚ $ RҲDD7IDdDDArD2TrdT> Ta-Tfa:T|GTTT"!؂Lف0{T-rˆT&lÕTS!AAůTLBƼT2T4TdbB%_7EEـOFы`Iw'E%G0FAool$o+G_ 3+ Ktg2iSLo^l…uvooo+G%F1 D0)#nY@o`jRՓ1a m40&P@*H 04NA#^^l K}Cŗ}u6rAbI"|r3V$bH-sObjYT{pF])SYqGTzzlGs;qcCaFnAJoziS^d8EAAA Qd"E ( !b 8p>J7zBWWxwo2`OCKցd0uz2;dd|DEI"P@ _ @#>@HHEs8RՂrH(%B~4˟sUAZeeygE&b1Xy%A.@=~h̠$LUH0ABA4˿ ݵE%o&` mTWxEk0pG^d!`jƳΪ3 gݠ!aAaQΣB4ZE5%(7I+G11mSẁϻai5r$ԯW#c RWf(:LXa̠nN$B QBQBqQBA2A-!SB0QB=QBJQBWQBdQ;~QBQBQAQBQBQ`5qQ-?.D;` `ߢ8$DRA r7>W9 a`Easarƃuj|7Bo4//BR5/`+RP,K`PbWAټlQ@A7SPӬ/(//[!f -?!?3?E?W?i?{?CTBPi䒥rw"/(WjL\nOO'O9OKO]Ow"lb-෭_S-qEza??? 4PGf-`ϑ(\o8SSJR_XtVVV|Sw#,fOoo&o[phPo\otooooooo}Qؘ3auM%FuR)KreP?8_(n__vOOrOrqi wuVoO_"_ӏ +"o:L qEzD!4FQr^r5f?Jlej1r7% Rl;:>nt&_5XߏQ㇢Ћ|A?1C%-?cuzDa-KsM_qσϕϧπσ9 ӯrp0BFؔʟܟ$(YPr<!3EW ɷ<ض*@ J+!FEb paҊh@6@BP(d?TuL` Ӛ޶?u:t `[tқLuᎄ8?@px<޿ ϛ?:e[?3Lq[mB>ajO_̕a6z1 !3"lWmVq;PKVP vR]b $lHbNcz1qiHihTD ēct!paߞǓiu (MUT6x4%ipKvo?hE(AiELuiEo쁃@@\.G:EPg:z^@@:p8o@"pa @z31"CpSD Lu,XM&d2?EWiB 7Tp.`2JObxEtlhߕoz1 cy!RRy!he@);Mj>IOB$O6OHDc1?=c%eO?d2v#d!?}A!OYkOObtovEQ_uM4%4wjPv`Q0Ta=d,@>|Ta??oaqPTѿ*b=蓦 dwd`pvnoq__yU{6`*Fʽv\n2B˺vl$A 6QŽ6%ޭ>ߩߣ({B˖-tBőuBż>cuuj "Ϻv$66&Fpm0*ϴ0.Žbi由12PPs% rXq'2qR!TPش`!'L㶹51B %ճ t@a1aAW!`2a֕{%qۿy@SQ&s}"xH 4?@xMQQ@8bBcAWlBmA2 1 옞[T/,@g-)gg̝qCpqou>-ȵp:Tgӂג7¸ `ܿ \$w@(ZdF@@)J9|I*KT@= H@hH! G @GH"I'"JG"Kg"ȡ"M"N"O"2_q'2RG2qg2e2Q2V2W2XBY'BgQ?JR  lL lLUlLlLlLlLUlLlLlLlLUlLlLlLlLlLlL)S7 4{)Xn)VnTpxp `npopř  =I.[aa[Ybyb"]b^b_b)~$?.p@@r=`rc@rgY)$a^1Mi`r }[$s$-ۡZ/Srj,}re@ʏ@r܃/m@=Pp@F sݑ[ݑ[CGSGGGGGGGGG#W3WCWSY+尜_qUPu\ed_,g )Ġ2񅥣ɢI7 @!IcS[H7'!ԋPm2P񹀢#PIrIc 3 bѠ / c` /אV~ϼ㷿ː*!jF <ϾпJnπxѺ14LJ1eNH @!-Ž ! ,ql-߲;9K cs!I#ݜܗbbObҁ؂*Ւa| "עmD1vSejQ!3na +j, Yp//0B/x "@* u@A9Hh 83 I2@>4qLj͠K/8f?ޠ8J@S,wcYB>j-F3?1f)ѨɒX;n?Fu& Ԃ`@AaQkJ@Oj5ЈG0Y_ _2]A­UPV PUUUPAa4U 1m USe_?]j 1meUv^Seq g PBeh$gRTd6`P+;h pZs"Fns[vsFs6ɓr}Xssrr}Asr}%AsAs}6ӓ2v6,ZvJA6x%A6A,6ݓvA܂ṽAA#66v:A0^vNA:|:A@0v?:yvU6#6XC)C0PT^E#$ܦ r"Ɂ\)<^ iAi$ ?23@ٗ??=PsDb5T;pAr6RCA CD8j1` TavelShi"@t<@[R篱1`O@fsͰڽEXa\/4AV6A]ayv6yÉXyÝhD})B@Ah+o!_=<@@`/{1aQAun"T(7&CE7رz Ei;FvBQQ2!HAIAJ%AiAiAALAM 2AN2qO$1aa7QQj0RѰ T,pQU9AWW3BWSBX$`RYm)BZzqI[\]aAA_Q**aɰADݱAaaA;n q:}A,nA,7TaW`BͰsP@hֳugfoJmɰt˰lɰQgͰK$*µ`0B<@Ӥ&8DV+VfTͰxٰbnv@1U~ aQ!vW1!<@!3iRͰRt/kհ yPN`r2<@ 3M/fBnKWi{RS q//AO˰aϰ4/F/X/j/b/d3/4H<@/8 /J,7fF Uiٰ iiE?i?ZC?Q@ǹ???3ti "Gnpɰ!a.O@O _O8SDwO9ۯO`/O3 qiVUlϰsٰd=Kme__=_*cU_18H_8_.i_oo!/osGo6RԂo_o6` k_oo sYmew/% UkEu6_0Y ЃYm:7I[X3iGFwE ֳ Տij3E8+q (p) şݟYm);5i~Bv~ ׯYmAz*!33%O R~d ſݿ173#5i% \n!r]Ċφ ϒYm DG+VBZddv߈ AߌYmJOPtSir/!b)&^p l YmwZ܁A034QRWREn @A@LE @H>P q`T 2 DhO6-OK%B:?0?>Pb eL}Ym*__TD+As );Q>#(iYm?##O UA@s&h/D-h"'b/3 //K܂/`On/!/ODiaC+7[/&= L?^?p?K RU ~??=?C?&=aO+O=OOOaALnoOO;OqSO&=O__XKC1RR-t@_R_>v_Gc_&= ^t!t mOto+o;Lo'sdo&=%oo,RdB"s l:Ro?F4 |_K+24*t u-*faD#s"r̄h JD@VhFu9S OR! IpMt} t:΃C#/('܊a 5rЖ # * h=>UO%gv`nlM!khDywROHwsßцޟ*$Ch[*&GKSKtUTpxֲGrlOw+*-&@rĀ4@ՊG5 )!A~s%pWBPNA:!bbe.z t@NEڰWOKUSUHP RЪRڰI SQ/qDK rp8!'CTEmѶa}=ILӯՖz@bX,@x<˾?ךԔ"um` F_u` ?Qp>B .4 s"rBAs"h`A sB`zc}sԖ^lz$UA CC։U/ ѹ{@wD~b`!~0!~PmР ߲mГe@x M|QMkQMZQMIQ MAe Mg`Y s!+*x<[g{ YD0FR agP)!{s!= M ^ opH=?Q `r* :%UM^oR$R r>E Q% CUgy -Quo??;?_q#OGOkO/%/7/ I/[*'@V#@/pmњ@@=g/y//////// ??-???Q?Ou?O?_????OOo;Oo_O OOOOOvO_7_I_[_m_[/____oo,o>oPobotoooo0oTox(:L^ڟ"$HįlƏ__X2 @`oqr8o\mW>qڨєĻң$ĐҤi3Ľ|T 8 E R\_#lyq_ղ~N'`b_`IDבҒ}ߵѠ K.egr߱wa+=% Djkuwqtbijs)$ҹsמ&3wwpщ| WZr{RaA'4 AefbkdTykpCDbnObjp;GupNp;A r nh.ONѰżWuL`!Nb6)/QRY@)׭Zsnd()G8VqH!Q.Q;Qbwr\` Engp_schbfJ7#!zp!A!bl pdptC5/G/BR&?89HQUM)))hCUgY"`# 3U#+#%b*'[K]a6ѓa6)???w5? OO.O@OROC)TG/$rI"Hr=Y$QI"h/?0W("///OOO __Gr=obm?nP)vOOO55SdcT:pxdk@O !}SZ5oGisQWeI#-R-_\)oooJ9o{Srb!?@Rdv@j 3aJsӡuy#)U5Kjrpl9o]oPotծq֬u)t3ŧ˟# aR/!` / ?)[r5X_u0r$OERJgqiP&oRs;E!nTnGbwbyPdnah(mTwojlV(RtktPi"kRtb(Q!!޵bn/sHj l|0+U͢a8̡NtPpbU+sRwtb r5:OWŠ%T@{%O̠` 9uPCscN _꟒ 4Hџh4?.@FW}Ѥޏx&A N2ߍ kW̡n{̡(ŧhÉb.Wأվ?PG>A?!xNӃ:xIx~:P(\kAy,ityhyIRU |kZ*I !!* 'O <RҡJ1 |  |Uz qF BP(V>rJ1zߌߞ߰1 x%)@Q 2DVhz 1 J\nFjZ./|"NӇ'/*/{_H=_δAtEo:s#ϑ)v7տ, j%p4UFfl6IcęJ?F\.?FuWu?Fm{^_9pqB-ѹ07{"'&4-FU\租 /j=/ a/^2^2?H r74N:uP¡Z/ "Uϟ?r/ΰFOD0q'9Ko& lgfRCGo޾D`O0? Pb_P6 >?ώRc@/o?1A%Bkb׀tnbe߷߈`A Sg-튞Xo`4vҎ);wUBmvf\oHg+H_Z_l_~__]_ __b&o8m1Ö5}HmkP;gqq@((?+"Ͼ.t'u'/K—)M?19tѡl?~4v6=??9O+O=NXO,͕NGۆw$NX#_ D,h˯\ گ0Wy Er9be\2?dU@, q=C;,?7Rv& puuӤU 1Q|QA-F\q+1u'tѡtաPuQvE$ !1T1; j"…////z./W?@4'zC%FcpRLF? (Q$/B٥4LFGOgNRgOmk\I|OK2u19ajR1 h2Da`^5˩?@$]PYjMN; SQNQ mù~:G?@zDֳ?@;:y?@hKߔ\1YPxR@Yٍ( 0ilBie_a2U0*sieA?ܵN_A k+ݓNգŲe}= A?@?@C"<%?@.>P0~olDJil}?5^I|QI0|$~FXزe8#(m?@xMÖ`DϜq;Ϲl|гYilߍ|vߜ0|-C6* 8aoW?@'|N?@?@Vbl h"lxilp_Q|mV}baq3y/L F̏ޏe6D~)*Ce?@_1XwW%l~jtil7A`|׿p= 0|?7dڏ֨e揷P אNi?@|XYc$m0lioilJY8x"t q8tN@aRdHeOw^ #?@$!?@o =cͫlz6>~aliF~"tkw#~v;*Eef ?@[:= ?@ּ*8nlY߷ʖj\pҦ"t 0|Cl!׿鿦aU[e5A,GeY]x0| 3U+wѮ8']jZ @A^[ Tr0v:`l_hi4`=hmFr(gUgkj'0RrXY]ȱsC|n %7OoF6[_)o;j#oҏYfe^9b@l@q`?@a W[eerC2uL)ܯ@b8An@(bu)@@r(r,ru,=y8q紥,ȨTev?\.? ,Y^`BKc!gKe.C X3GG"HR ςՂqamsqasp 7Tɡ4žBAAr"$hҳuҴI;⶜·s"ÔДݔL,arrsqC҉+y8`EqBA_*lqayȆQQɡ_oTqa<QϏL;c`:ׅՇXo¬@ԯۇ)p! K"eAiru&8Jۇ.` D@n]fI%Cʭfb@ڈ NKKB-XGͯε8k_x RXӉ-rbx"V!V"bRdԝs0bj VTj0p]#)S qzQls;c0zѪ!nAo*iAߛU!Ѣ_ČB#52B@e_s`CO0Qіd0u*;2~1d2d4BB,m-lbH@)p@\蛕es%2DBӟ!GrvՂ/#sA bee)@g b27CՁ >@p<qMQsА RwuܬJGֿ` T@x k0p@"q4I aSqaa\]QMQ$󛕴uۇ%1#S0|1ѕYk}sďEZDBƑ!APҨ+#g R@f "5B/?^?ѓ=.;H 9IQbo|碅!ބ#`%@/R!# :j/|.ӓ/Β$U//3O1IQCg2&?+`i%Es2~saZ0]?o??_,_>_P_b_tW__BFo1OCOےոOOO3?%__+PR!SZo|)ueS`Ys_\n ! }sֿ +i$B_l&sb1}g'roxWR DoVoho@ŏ׏ @1}1}!*$POasG}//Ao f&|)!H TPf'sCį֯ w $/6HZl~QDHaǵAS/yKre߮@ yϋBTCv$AVb"2beOwEg2a¬  'VDW?ҟoo߹on]urJu`OrJ!*!$߰V#7PW7Wew>y@0PŊ%dBa<0Cm@oB;b2aBw4tN0sk82ab@191GA1‘ 1aT?1 brdtsDЇbj1bk)ipbͲ&TAPǵȷ*ϲi1AOa'2t!)0ǵ鍋qx@ ȶ "mC "CnFxDY1^3` AF0tt&xA4` RIm@O1xu61aޢ?18)x"q$qAPǵɴ8Ec'1'1ǒѨ(L}$q0qGEo R`RUnBcog,Oαwk(;bH`jx@,@2B鿇x@B@–oD_=x@@`{1ŋl$q UAFu$q-ſ>xBp9x@+x@@x@@p@@\.?@O:q$G)qs5,D<]~?S[?/qqjO#Bi8djJ NyH'r/q/o KO @@h@Lt:w@rp6 8`@z@ 2#p%!g\I\np% 10/.E/W$w$RAMfϧϹyHn\T1\?6_H\LQLQ1 UfxG_YX_T)TE *gU>Pbt?oX, j//b3S$R|2,f58$R////// ??.?@?R?d?v????? 2*OܿyO`Nj/MjOOO$?/__+_a_s__qg__/Mg___k/!o3oW|LeeOoN:Oeup_0Tj R}__o o Voho_&[loZэoNLJLj oo(MoПTvD_n+=^pʯܯ+hzJ|X/zO<y"H$@@btӭ9);M_q O//%/7///O?a?///?!?3?E?zi?{?ď?????J OOJEOWOiO:ύOO,_~b[U!_3_siTU?_o&_ϐ,o>oto2oozzu D&Jinȏڏ"Z QRӊUo@X'V%[$ǀ?[AO+gF Ϝ]a<ؕkG$O,Wf'U@X, @@4FO@?gD7P -DT!T_ԝ_(SWE}aAh@@WELt:@)p%%Hs@z3BOOp*/W_/_A^Н56VV!3zFAM9iO{OC(nPͿq߃܇чD9oKo]oߔ߉T!mV%?! /Y ??.?@?R?d?v??????U@ ??O!O3O*Vj@@6`E9hUagݔOJ H;POLU7E_S7_!p]Uqj_|__>X_VφVϏ&ooonoo¯okou Q_B[T+/əA>"o!G$f!r`0 ?@N@8P S+gQ?&oT*JR?@KնWBwӚ$&[rod_C$<ߴ'9ՀDuO` &?jϒc\o2"c?FX?o AQU'yɯۯ#5eQSHeQxQ8COuk ID Po6 nte_r 5r/ؓƿ!~ūV%C$V%D6 r*coyP;!!G$;!)Ar߄?@BP(o@@@XH??QS)ӒS5WV {=J{"=B{{9ԧ섅K(#OC0P贁Nk{Dbƪa'聍uB:i/_b z_"Γ™3=Oabt1*?D>27e>?u?n*A ,0BTa98+@,@I\TzM?C?#q n) /Ken(H7>T8U5'^A_S_('[Q__'U<_P_Qe^ro&ox_H[omo߸'[ooZYؕ'U?|>ooZY_o<V_~ToW~_WUdm (BWkseiߴ _;e=唻X'I2SS{e!$M$Dה5GeSeFl/On@#eSXX/`BW"p1A¦r֦`By //-/?/Q/׿u/////??T?;?M?_?a?????OO%O7OIO[Od$O呴OOOOO_`-2_Qc¯b_t___0B_ox:oLoooooPcu4 `֯2WiiA/㷣Ǐُ!3E1Rd.1r`?̟ޟ0BT&p@CUgysfFtFuF 1CUgy ???cu3E߫Ͻϓo)loM_ߨ/ߕ߹.1}.1);M,oqsb/O嶏W/MooOa%@mGH㔯i{Oȅ^;"ܟr/6/H/~///// ///? ;2/?A7 O?=s????? oo1oO͑ ` O2ODOVOb B+_r,vhOOOO_ _2_D_V_h_z_______߹@oRo"oooHooo!*<`r}  *N`P?Ϩrhʃk/=/0B//x ??ҟR?d??>/b?-ʥ۾O/O'EWoO{OOÿMOO )//G_ʃ_N]oρϓϥ 3a0_T|$ F\C s%?8s?h|Al\EVEU@@ @@q@ӒwPTQ#Mxx4ST*J@@t:N ~^?@x@ )pl$ @z`@~:ʥJ\eDffH!MI5!3[n$5 @t*..5@z);lTab= |oa4o{oooo+=Oas32G-yޞ?NtzUp_.R>Pu`p@s(mbmb@xď֏dᘥ3"Ehrߟ̟r߄&{?QP{5|jᜯү,>Pb3ɿ~ANiU0:LX, `>qp*c@bu3ghߞ߰Vz j.@?-jr6HDrKCY!AqYT >ieP_aE}zI[m?򟣗ٟv0S~wxqs~w|u{Hϯ᯺CietBPhmXXGW'2qRVغC!kD4}A^$7Z/l'z/ Ϟ/////$_6_pH_.?BK? ]?o???>yK??OO$O6OHOZOlO~OOOOOYOO_ϛV_h_z_8o___>__ oo@oRodovooooonoo /Aew"Cg+Os/ߏ?/@c/'96/K߉ ?//_߉?/n?fxT/?2OhOzO>Pbtο8ÃQ1%jU $ FS:Oq?_lv cDL1E9ɽ5U@U0@@@ @otىgP}gk7ZI%ԝDhWA~a @@tpn@@x@ )Bp*>A{w;@z3ur!{~A+VJUV38vM %*tm,9%z%9 a]ϴ23"#,@!3Tat-B?#?aqzDV]åAKAE>1ŚEv[?O7!>m;?M?dj??r,r?>1aE??1*N\.BOTO*OOOph;OO9h5@_ _9ǥh;jOo_1^O_7 1B_TPy?_7 5N,o>o-iO(hXQg_'2UbMȊ-!o-gƀhz;btKq}߸/,6HnQYk}hB)! C?Qcut/;M_ql%bI[_/!/3/E/W/Kqv//Kq////?[o,?oP?t?)ߘ???q95OO`OrO_9O_O_ 8_J__߹Y__o"oXojoooooooo 0Bvk!Oatk!lg 2z^CUgyw фˏݏI *<N`r̟ޟ?/8J\?ȯگM4F|jO3_OĿֿ 0BTU`rRmUϪϼώOEi:o^oAS{ Bfd]>bf폪5ˏ3}:Lş$N19KN1Vzo /'i2L&-/?/Q/c/u///?'Wҵ/////?!?3?E?W?i?{??????? OO/OAOSOeOwOOOO}_O ___+_=_?a_u______ooB0oBohozoo*ooo q.Rv:%Im*#r矈̏3/E//J\/?ڟS?e?"4FXj|Ī>үmAO(:L^p=nvɿۿ#5ϯPzR}%Qcuχϙϫ);M_qߧ߹(_7I[3EWi{ނmAmA(L^}?? 1Uy6Z~ -/Q/?$V/h/Ͽ῞/)//Cq.?@?ov??߾????OO*O%7I[mq()׉F# 2DVhzݯ .Rdv珚|////voPbt_SُL^pr/E֏$6HQbtNQO7O<ѯ`3" Cb!oobe}H/0/!3f/x/i{Ϯ///?,?P?At?eߘ?߼??O(OLO^OpOOOOOOOOQ_TQ$_[H_Z_l_~_____W?l6f_ oo1oCoUogok7v݃ooדoo,>Pbt(L^pv܏p6/'?l/Ɵ؟ Q,>FMQdv/9]O*ON_r5_G[y_6ϝ_Z_~_ oX_Qo2ߙoVZ_zo_)߿o'_q.@vE*<ŁJnObX!3EWi{'W#2ߩ V+*/uѶ2(:L^p//$/6/H/Z/l////r?/?? ?2?4oV?τό??????? OQŁ$O6OŁ\OnOO8ϤOOOO"_F_RdϷ_FP__+__(o:opoo7Iooo\6Z~W4 2DVhzOFƏ؇!/ .@RdvKPT*Jϟ)~0/lf^HZxȯگ"4FXj|]_cOĿֿk_ g?BTf$ߊϮEooPo>Pbt߆ߘߪկ!}!$6oZlOs ͋#Gk(Lޏpן{g$lE-?/ /2/@D/V/h/z//.G/'K/}/??*?SOaQloϢϴϷ 2r\.CUgyߋߝ߯)pk$%7I[m!3EWi{o6/ASU?w+XQFXQ~@O_ /D/h/Ͽ//P?/;/_?@?9Oψ?O??I[Oߣ`OrOߨOO3O_i{8_J_\_n_@_____ZI_Wo,o>oPobotoooSog@ oo'9KIfx,>Pbt3/Ώ(=L^p.џʟ?&?HZl~Ư3M,>/bt_#_G_k_(Ϗ_Lp__oCogo$:_ol~oo_o?YoDV_ .@RdvJQ"4OXjG"#q{a-͏tgV+/nvhDi2L^p//$/H/Z/l/~/////۟//??2?o?h?z??o?M?? OO.O@OROdO~OOVOOOO_/4_SX_ |__Obd(oK_oP:oLoooI[oo$Hl!Ei Dhzԏ K 2 1@;?dvП}@ );M_qK@j?k䟯ïկwF!3EWi{ÿտ2 /*oSew?ϭϿ} O=߸oasoߩ߻' 14F@ 1l~|o/=/Ï2Vhz/Ovi{0fԟ_?ޏϯ>\wM_/(/ݿL/^/p////////6L ?7*?%N?`?r??????7^OO$O6OHOZOlO4d~8OOsOOO __0_B_T_f_x______oo>oPobo߆oooho o:(^0-VhzLԏ'//@/d/;ʟܟC?g?$?H?l?"?OدcO $?DOhO?OO/;_ q__@RdvψϚϬϾM׏ao8J\n߀ߒߤ_/|> ߀!3EWp?VAms'9K]o@#5JYk};?3Ugya/a&/8/J/n//_/_///S?w4?X??|?Ϡ??+?OOs0O_FϻxOOOϫK__eߓP_b___#__oo(o:oLo^opojNog+o]ooo .@dvw3f hm2 2QVhzԏ ./RdvП֯ǑLp`_jb #5GYk} p//)/;/M/_/q//////x/??%?7?I?[?m????sO?O?O!O3O5WOkߍOOOOOOO _8Ǒ&_8_Ǒ^_p__ ߦ__׏_og$oHoloo0oo?oc hݏ~);@R/ЏI/[/*OQd? ??Dhϟt_@d˯]/ﯬ!?Ÿ7/mPbϼG?k?(?L?p?OܯCOgOOH^?O __ؿ?Oc_ 2}O_hzϩ?_)o;o .@Rdv߈WߺCqu"4FX?|~jZ ?q'9K]o#5Gk} 1UyOV //-/?/Q/c/u//Cq//`Cq///"?4?SoX?wo|???+?O OO0OTOxOߜO_'OYoo,_>_t_x___GooEߏLo^ooooooo$6zxGYtxd*6L&;M_q~! Ϣԏ .@RdvП?*P]xvψ8Oo<ߣo`ooH3W{81o ASXj㏠o+as0BTfx  Y{$6HZl~Ko|> //0/B/ .{COƶg/y//////// ??-???Q?c?u?)4߫???$?OO)O>MO_O_/_OOO<_O__7_I_m_____\{__{o$o6oZoloooo?oc Dh;͏_2dv7/Q/Pbo:N٩//eoI1/C/U/g/y//////// ??-???Q?u????????O)O;OO_OOOOOOO`_%_7_I_[_m___M__jM__o,o>o]boooo5oY:^ 1cy6H~i /Ə؏#Q/ O/Vh//Ÿԟ .@^N`1nOȯگ"4?bGYk} ?W2?bͿ߿'9K]oϓϥϷϚo#5GYk}ߏOߕ81CUWy -Z1HZ1B/?"FjџR=aB;/ͯ/K]/b/t///5/?k}:?L?^?p??????:_?7 O.O@OROdOvOOOUOGxOO__)_;_M_٤Jgi_{________ oo/oAoSoeowoo6ooooo+@Oas1)K]oɏ-.@dvϾ%?I?m?*?Nr??!OޯEO׿iO&!q///z??1o6?UoZ?~?Ϣ? ?-?OOu2OVOzOs_O7oM __R_V=__%__#m*oߥobo&5Y}^to1oy6H~o ?Q 2DVhz buD dxA o7YG7E$-Wi7Gtx<?@vn?#9edQYҒ-Qu!$ W]gur!r$ ')%ȸE#%MEQ!"// 9&#bdz@1@#eϫ31%*?iI4wu1K?p?O0O/EM`_gm@gsWXA W2tI+WowsћM.߳MՇv%iIM2GՇQSrKK`2cZl~ƯدIr&=Ogyya}3E-Ŀ־Q&'9K]oϥϷ#5ߐ}ߔ!=&t/7%/i,|/[m///G-PEho/AhfxȊtI yw0(-DT! T8Xa]\C3(/3lBF]L" S31 ^LTzT[NRPLTh&_ASeqe;1Dk9Kq!怣 _gqd|g1&a7A>q???7I OO0OBOtѢ恞|@@p@v@@4F@@xsszu0n&e:Vv;Vv<Vv=Vv>V` S S-AVfABVUCV0id Q}EVFV2 S Q͆IVJVKV LVMV1N VEOVY SfmQVRVfSVTV0 SіVV3WVXV CYV!o[VfI\V]]Vfq^V_V0 SaVOBrvvvvvvvvv--AAUUii}}͆͆  11EEYYmmіі  !!8FI]]qq@,___O]Si-B=cPy;4&e(mwX':d nd t mm1=mmJmmmաmMQm*mm!m"Tq%>aon2q|ohqUqdq5uqdqsqdqdqds1hA=eqdU hiqah1h!uqdh1!h1hၠG!h-h 1h5h19hE=hYաhqEhqhMhQhUhGSof呠]hah eh!ihImh]QhqqhyhAh@qdNh]*`P`Y`q``q``e``q`Y```֤` d``A`,`U``ik`d`Y```0I,*`*`*`*`*`*`*` *` ,*` *` *`1 *` *`E*`*`Y*`,*`q*`*`*`*`*`*`*`,*`*`*`G*`*` !, "#!&'I()]*+,q,-./01ୡ23f̓T}ZdmSmxh*xA PumX-Q_x<?@vn+?_tgupufx r!կ--!tܡaut"}v1}-a8>26q|Co3zDxyPbz@Qh@01_0P贁#NkjEw)lS0U!%ESix%~U/x6譡_DӤ۩0s!8b 9b\wxpݧqğ {u#5w r22O|rU*&00goooo:%I ^a~s~Ê({o vo-PBWi{ÏՏ /ASewүќP䙊\NrQɯcCB&8VS\kj|ՇEſ׿鿓ITkQgauj~|`jt!CUg89YBi0jT1D &0U0DDx ϙ̛ѿRA'/@iߠ߲?!eoi>L !E{D0+{DDGA0TY)[mSew3iw[1u-OGHaL80G0%7I[m&γXHR%R&]/*,s/ϩ/'R/$uPTofxϊϜ,> )y4?@p8@)pЁFM z3h@F X;p:xhM&d2?peH1̟.E2kj nnLeہd݈^RTg  @bpἅyDЉa* ˜fln@#ƀ@ nFmOBpBV._A_'98zOODh56ffV/h/z/1/C/Ooaoso//!//2}!!v/`Lq?~ooooooȯooZLq "LqTqwMmӿ!3CRdv؈pr@K#{f5 J|O>xy- 2p6"߫ߑKџ㟛<ЮV$6H7I擯$ۯ 5G%ewx 7¿( .@RdvψCAܧϹWNC"H$ĈxX?ޫ O߷J?~omFFh4߀UpPlXA&-/@/&8Vyn66Ugy0BN?`?r?u1My_wKA}O;M_q (3 2D~{{1ƆJ^T(B8[0 sk"pCDfV/CoUo7ؖfoo{//EXP__TV//dhEkvjwgFRB????????KAO!O`vA9/B?BpmnMe]sagRf_To 5Rl BpmnMesagRf__To Eti#y  BpmnDirect o&Textan}mrkug.49*Text AUnoain.490msvWarwnOPesoal5IfZeigr (1-D) Pointer(1-D)6Funkti]o /Sbrune.426Functi]o /sbrune.426Funkti]o /Sbrune.436Functi]o /sbrune.43$Zeigr (1-D).4&Pointer (1-D). 4$Zeigr (1-D).46&Pointer (1-D).46$Zeigr (1-D).5&Pointer (1-D). 5$Zeigr (1-D).52&Pointer (1-D).52$Zeigr (1-D).53&Pointer (1-D).536Funkti]o /Sbrune.546Functi]o /sbrune.546Funkti]o /Sbrune.5!6Functi]o /sbrune.5!$Zeigr (1-D).56&Pointer (1-D).56$Zeigr (1-D).57&Pointer (1-D).576Funkti]o /Sbrune.586Functi]o /sbrune.586Funkti]o /Sbrune.596Functi]o /sbrune.596Funkti]o /Sbrune.06Functi]o /sbrune.06Funkti]o /Sbrune.16Functi]o /sbrune.16Funkti]o /Sbrune.26Functi]o /sbrune.26Funkti]o /Sbrune.36Functi]o /sbrune.36Funkti]o /Sbrune.46Functi]o /sbrune.46Funkti]o /Sbrune.56Functi]o /sbrune.56Funkti]o /Sbrune.6Functi]o /sbrune.6Funkti]o /Sbrune.76Functi]o /sbrune.76Funkti]o /Sbrune.86Functi]o /sbrune.86Funkti]o /Sbrune.96Functi]o /sbrune.96Funkti]o /Sbrune.706Functi]o /sbrune.706Funkti]o /Sbrune.716Functi]o /sbrune.716Funkti]o /Sbrune.726Functi]o /sbrune.726Funkti]o /Sbrune.736Functi]o /sbrune.736Funkti]o /Sbrune.746Functi]o /sbrune.746Funkti]o /Sbrune.756Functi]o /sbrune.756Funkti]o /Sbrune.76Functi]o /sbrune.76Funkti]o /Sbrune.7!6Functi]o /sbrune.7!6Funkti]o /Sbrune.786Functi]o /sbrune.786Funkti]o /Sbrune.796Functi]o /sbrune.796Funkti]o /Sbrune.806Functi]o /sbrune.806Funkti]o /Sbrune.816Functi]o /sbrune.816Funkti]o /Sbrune.826Functi]o /sbrune.826Funkti]o /Sbrune.836Functi]o /sbrune.836Funkti]o /Sbrune.846Functi]o /sbrune.846Funkti]o /Sbrune.856Functi]o /sbrune.856Funkti]o /Sbrune.86Functi]o /sbrune.86Funkti]o /Sbrune.876Functi]o /sbrune.876Funkti]o /Sbrune.8!6Functi]o /sbrune.8!6Funkti]o /Sbrune.896Functi]o /sbrune.89&Zeigr (1-Dw).13(Pointer 1-D).136Funkti]o /Sbrune.916Functi]o /sbrune.916Funkti]o /Sbrune.926Functi]o /sbrune.926Funkti]o /Sbrune.936Functi]o /sbrune.936Funkti]o /Sbrune.946Functi]o /sbrune.946Funkti]o /Sbrune.956Functi]o /sbrune.956Funkti]o /Sbrune.96Functi]o /sbrune.96Funkti]o /Sbrune.976Functi]o /sbrune.97&Zeigr (1-Dw).14(Pointer 1-D).14&Zeigr (1-Dw).15(Pointer 1-D).15&Zeigr (1-Dw).16(Pointer 1-D).16&Zeigr (1-Dw).17(Pointer 1-D).17&Zeigr (1-Dw).18(Pointer 1-D).18&Zeigr (1-Dw).19(Pointer 1-D).19&Zeigr (1-Dw).20(Pointer 1-D).20&Zeigr (1-D).2(Pointer 1-_D).2 (DD3[GG3\%G3T$G3TE3HhG335G3D35G3,=#G3,`!G335G335G335G3D3 5G3`U(G3[}G3%G3Խ&G3TE3$HhG3, *G3345G3`i+G3T`(G3d1G3`.G3%G3LQP@G3dQPUG3TmE3<}G3XG3'G3`)G335G3345G3D3i5G335G335G3,/G37/G3`fA3j/G34`.G3`A3`A3"G3DHhG3;G3K!G33l9G3`)G37G3L3G3`8*G3b2G3t`-G3dHhG3HhG3T"G3,$G3XT@E3HhNG3lG3`*G3$G3G3TE3ԼG3`)G3`H'G3[oG3[G3[G3[G3_PG3[G3[G3[G3[/G3ؐGG3aG3 {G3<G3G3G3G3ؑG3G3 1G3<KG3X3e&G3XG3T\3 G3HhG3HhG3IhG3$Ih G3,\3<G3\3ZG3[3x&G3DIhG3[3&G3dIhG3[3"G3T[3" G34B,G3n*G3t.G3D38G34/G3`-+G3܉X.G3?G338G35G3329G3lk4G340G32G33;G3D3<9G33u8G3/G33:G3Ĉ2G3H/G3w2G3T2G33=G37G3,[3O%G3[3t'G3G3G30G3ļ)G3Z3( G3`HA3`LA3`PA3`TA3`XA3`\A3``A3`dA3`hA3`lA3`pA3`tA3`xA3`|A3`A3 `A3\`A3l`A3t`A3d`A3|`A3`A3Z3#G3Z3%G3IhG3L[G3TZ3&G3$?)G3Th)G3G3ؒG3,Z3$G3佳.G3-G3D1G3$uIG3$]PG3D39G3GG3Z3a(G3^3&G334G3(G3t 1G3d<=G3ĖyEG3^3$G3D.G338G3H>G3=G3DCG3>ZG3$`]G3$JG3<1G38KG35G3<G3-G3!GG3$h3G3t6G3T^3$G3t)G3+G3̆I2G3,^3{"G3^3$G335G3D35G33+5G33`5G3]3(G3Ծ,G3]3(G3,G3]3=(G3 `e,G3T]3(G3`,G3,]3(G3` ,G3395G3D3n5G335G335G3]3 (G3亳5,G3\3a(G3,G335G3D35G335G33T5G334G3D34G334G33%4G33Y4G3D34G334G334G33)4G3D3]4G334G334G333G3D3,3G33_4G334G334G3D34G33/4G33c4G335G3D35G335G3P65G3DPk5G3P5G3?5G3> 5G3>?5G3D>t5G3>5G3=5G3=3G3D=F3G3=y5G3<5G3<5G3D<5G3D M5G3 5G3 5G3 5G3D3! 5G33V 5G33 5G3P 5G3P 5G3DP* 5G3P_ 5G3P 5G3P 5G3DP 5G3P3 3G3Pf 3G3P 5G3DP 5G3P 5G3P8 5G3Pm 5G3DP 5G3 )G3 ,G3P, 5G3Pa 5G3P 5G3DP 5G3P5G3P55G3Pj5G3DP5G3P5G3P 5G3P>3G3DPq3G3P5G3P5G3)G3Ի7,G3c)G3D,G3d)G3t,G3` )G3`6,G3b)G3h3,G3t`)G34,G3t )G3D5,G3a)G3,G  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABC]>rFxml:spaceprsrv]rFxmlns:vdhtp/%scea .i+ro of.+o/i iA/20*c6%et/ \n]rFxmlns:v14htp/'sUcea .i-ro oUf.-o/GUf=e'v=s=]o'200'Eet1 lnU.U-t4 "H$ @Y,b'@T?/$C6E 0+ AU %t4 ̧[`0 C-`m7"AJ@?0LR@SZo06RH<(H<(JE?1RESZ1R{č  g"4FXo 8)i(N1j@(Mw]y5hGB C Qz`X> BvZR$[Ox!{)_V'l]~P1d4sP2T! ײ.B="dnG zK&D+Qd/?$f0W*'"D\y 1~%!borgbackup-1.1.15/docs/internals/frontends.rst0000644000175000017500000005501513771325506021376 0ustar useruser00000000000000.. include:: ../global.rst.inc .. highlight:: none .. _json_output: All about JSON: How to develop frontends ======================================== Borg does not have a public API on the Python level. That does not keep you from writing :code:`import borg`, but does mean that there are no release-to-release guarantees on what you might find in that package, not even for point releases (1.1.x), and there is no documentation beyond the code and the internals documents. Borg does on the other hand provide an API on a command-line level. In other words, a frontend should to (for example) create a backup archive just invoke :ref:`borg_create`, give commandline parameters/options as needed and parse JSON output from borg. Important: JSON output is expected to be UTF-8, but currently borg depends on the locale being configured for that (must be a UTF-8 locale and *not* "C" or "ascii"), so that Python will choose to encode to UTF-8. The same applies to any inputs read by borg, they are expected to be UTF-8 encoded also. We consider this a bug (see :issue:`2273`) and might fix it later, so borg will use UTF-8 independent of the locale. On POSIX systems, you can usually set environment vars to choose a UTF-8 locale: :: export LANG=en_US.UTF-8 export LC_CTYPE=en_US.UTF-8 Logging ------- Especially for graphical frontends it is important to be able to convey and reformat progress information in meaningful ways. The ``--log-json`` option turns the stderr stream of Borg into a stream of JSON lines, where each line is a JSON object. The *type* key of the object determines its other contents. .. warning:: JSON logging requires successful argument parsing. Even with ``--log-json`` specified, a parsing error will be printed in plain text, because logging set-up happens after all arguments are parsed. Since JSON can only encode text, any string representing a file system path may miss non-text parts. The following types are in use. Progress information is governed by the usual rules for progress information, it is not produced unless ``--progress`` is specified. archive_progress Output during operations creating archives (:ref:`borg_create` and :ref:`borg_recreate`). The following keys exist, each represents the current progress. original_size Original size of data processed so far (before compression and deduplication) compressed_size Compressed size deduplicated_size Deduplicated size nfiles Number of (regular) files processed so far path Current path time Unix timestamp (float) progress_message A message-based progress information with no concrete progress information, just a message saying what is currently being worked on. operation unique, opaque integer ID of the operation :ref:`msgid ` Message ID of the operation (may be *null*) finished boolean indicating whether the operation has finished, only the last object for an *operation* can have this property set to *true*. message current progress message (may be empty/absent) time Unix timestamp (float) progress_percent Absolute progress information with defined end/total and current value. operation unique, opaque integer ID of the operation :ref:`msgid ` Message ID of the operation (may be *null*) finished boolean indicating whether the operation has finished, only the last object for an *operation* can have this property set to *true*. message A formatted progress message, this will include the percentage and perhaps other information current Current value (always less-or-equal to *total*) info Array that describes the current item, may be *null*, contents depend on *msgid* total Total value time Unix timestamp (float) file_status This is only output by :ref:`borg_create` and :ref:`borg_recreate` if ``--list`` is specified. The usual rules for the file listing applies, including the ``--filter`` option. status Single-character status as for regular list output path Path of the file system object log_message Any regular log output invokes this type. Regular log options and filtering applies to these as well. time Unix timestamp (float) levelname Upper-case log level name (also called severity). Defined levels are: DEBUG, INFO, WARNING, ERROR, CRITICAL name Name of the emitting entity message Formatted log message :ref:`msgid ` Message ID, may be *null* or absent See Prompts_ for the types used by prompts. .. rubric:: Examples (reformatted, each object would be on exactly one line) .. highlight:: json :ref:`borg_extract` progress:: {"message": "100.0% Extracting: src/borgbackup.egg-info/entry_points.txt", "current": 13000228, "total": 13004993, "info": ["src/borgbackup.egg-info/entry_points.txt"], "operation": 1, "msgid": "extract", "type": "progress_percent", "finished": false} {"message": "100.0% Extracting: src/borgbackup.egg-info/SOURCES.txt", "current": 13004993, "total": 13004993, "info": ["src/borgbackup.egg-info/SOURCES.txt"], "operation": 1, "msgid": "extract", "type": "progress_percent", "finished": false} {"operation": 1, "msgid": "extract", "type": "progress_percent", "finished": true} :ref:`borg_create` file listing with progress:: {"original_size": 0, "compressed_size": 0, "deduplicated_size": 0, "nfiles": 0, "type": "archive_progress", "path": "src"} {"type": "file_status", "status": "U", "path": "src/borgbackup.egg-info/entry_points.txt"} {"type": "file_status", "status": "U", "path": "src/borgbackup.egg-info/SOURCES.txt"} {"type": "file_status", "status": "d", "path": "src/borgbackup.egg-info"} {"type": "file_status", "status": "d", "path": "src"} {"original_size": 13176040, "compressed_size": 11386863, "deduplicated_size": 503, "nfiles": 277, "type": "archive_progress", "path": ""} Internal transaction progress:: {"message": "Saving files cache", "operation": 2, "msgid": "cache.commit", "type": "progress_message", "finished": false} {"message": "Saving cache config", "operation": 2, "msgid": "cache.commit", "type": "progress_message", "finished": false} {"message": "Saving chunks cache", "operation": 2, "msgid": "cache.commit", "type": "progress_message", "finished": false} {"operation": 2, "msgid": "cache.commit", "type": "progress_message", "finished": true} A debug log message:: {"message": "35 self tests completed in 0.08 seconds", "type": "log_message", "created": 1488278449.5575905, "levelname": "DEBUG", "name": "borg.archiver"} Prompts ------- Prompts assume a JSON form as well when the ``--log-json`` option is specified. Responses are still read verbatim from *stdin*, while prompts are JSON messages printed to *stderr*, just like log messages. Prompts use the *question_prompt* and *question_prompt_retry* types for the prompt itself, and *question_invalid_answer*, *question_accepted_default*, *question_accepted_true*, *question_accepted_false* and *question_env_answer* types for information about prompt processing. The *message* property contains the same string displayed regularly in the same situation, while the *msgid* property may contain a msgid_, typically the name of the environment variable that can be used to override the prompt. It is the same for all JSON messages pertaining to the same prompt. .. rubric:: Examples (reformatted, each object would be on exactly one line) .. highlight:: none Providing an invalid answer:: {"type": "question_prompt", "msgid": "BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "message": "... Type 'YES' if you understand this and want to continue: "} incorrect answer # input on stdin {"type": "question_invalid_answer", "msgid": "BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "is_prompt": false, "message": "Invalid answer, aborting."} Providing a false (negative) answer:: {"type": "question_prompt", "msgid": "BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "message": "... Type 'YES' if you understand this and want to continue: "} NO # input on stdin {"type": "question_accepted_false", "msgid": "BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "message": "Aborting.", "is_prompt": false} Providing a true (affirmative) answer:: {"type": "question_prompt", "msgid": "BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "message": "... Type 'YES' if you understand this and want to continue: "} YES # input on stdin # no further output, just like the prompt without --log-json Passphrase prompts ------------------ Passphrase prompts should be handled differently. Use the environment variables *BORG_PASSPHRASE* and *BORG_NEW_PASSPHRASE* (see :ref:`env_vars` for reference) to pass passphrases to Borg, don't use the interactive passphrase prompts. When setting a new passphrase (:ref:`borg_init`, :ref:`borg_key_change-passphrase`) normally Borg prompts whether it should display the passphrase. This can be suppressed by setting the environment variable *BORG_DISPLAY_PASSPHRASE* to *no*. When "confronted" with an unknown repository, where the application does not know whether the repository is encrypted, the following algorithm can be followed to detect encryption: 1. Set *BORG_PASSPHRASE* to gibberish (for example a freshly generated UUID4, which cannot possibly be the passphrase) 2. Invoke ``borg list repository ...`` 3. If this fails, due the repository being encrypted and the passphrase obviously being wrong, you'll get an error with the *PassphraseWrong* msgid. The repository is encrypted, for further access the application will need the passphrase. 4. If this does not fail, then the repository is not encrypted. Standard output --------------- *stdout* is different and more command-dependent than logging. Commands like :ref:`borg_info`, :ref:`borg_create` and :ref:`borg_list` implement a ``--json`` option which turns their regular output into a single JSON object. Dates are formatted according to ISO 8601 in local time. No explicit time zone is specified *at this time* (subject to change). The equivalent strftime format string is '%Y-%m-%dT%H:%M:%S.%f', e.g. ``2017-08-07T12:27:20.123456``. The root object at least contains a *repository* key with an object containing: id The ID of the repository, normally 64 hex characters location Canonicalized repository path, thus this may be different from what is specified on the command line last_modified Date when the repository was last modified by the Borg client The *encryption* key, if present, contains: mode Textual encryption mode name (same as :ref:`borg_init` ``--encryption`` names) keyfile Path to the local key file used for access. Depending on *mode* this key may be absent. The *cache* key, if present, contains: path Path to the local repository cache stats Object containing cache stats: total_chunks Number of chunks total_unique_chunks Number of unique chunks total_size Total uncompressed size of all chunks multiplied with their reference counts total_csize Total compressed and encrypted size of all chunks multiplied with their reference counts unique_size Uncompressed size of all chunks unique_csize Compressed and encrypted size of all chunks .. highlight: json Example *borg info* output:: { "cache": { "path": "/home/user/.cache/borg/0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", "stats": { "total_chunks": 511533, "total_csize": 17948017540, "total_size": 22635749792, "total_unique_chunks": 54892, "unique_csize": 1920405405, "unique_size": 2449675468 } }, "encryption": { "mode": "repokey" }, "repository": { "id": "0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", "last_modified": "2017-08-07T12:27:20.789123", "location": "/home/user/testrepo" }, "security_dir": "/home/user/.config/borg/security/0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", "archives": [] } Archive formats +++++++++++++++ :ref:`borg_info` uses an extended format for archives, which is more expensive to retrieve, while :ref:`borg_list` uses a simpler format that is faster to retrieve. Either return archives in an array under the *archives* key, while :ref:`borg_create` returns a single archive object under the *archive* key. Both formats contain a *name* key with the archive name, the *id* key with the hexadecimal archive ID, and the *start* key with the start timestamp. *borg info* and *borg create* further have: end End timestamp duration Duration in seconds between start and end in seconds (float) stats Archive statistics (freshly calculated, this is what makes "info" more expensive) original_size Size of files and metadata before compression compressed_size Size after compression deduplicated_size Deduplicated size (against the current repository, not when the archive was created) nfiles Number of regular files in the archive limits Object describing the utilization of Borg limits max_archive_size Float between 0 and 1 describing how large this archive is relative to the maximum size allowed by Borg command_line Array of strings of the command line that created the archive The note about paths from above applies here as well. chunker_params The chunker parameters the archive has been created with. :ref:`borg_info` further has: hostname Hostname of the creating host username Name of the creating user comment Archive comment, if any Some keys/values are more expensive to compute than others (e.g. because it requires opening the archive, not just the manifest). To optimize for speed, `borg list repo` does not determine these values except when they are requested. The `--format` option is used for that (for normal mode as well as for `--json` mode), so, to have the comment included in the json output, you will need: :: borg list repo --format "{name}{comment}" --json` Example of a simple archive listing (``borg list --last 1 --json``):: { "archives": [ { "id": "80cd07219ad725b3c5f665c1dcf119435c4dee1647a560ecac30f8d40221a46a", "name": "host-system-backup-2017-02-27", "start": "2017-08-07T12:27:20.789123" } ], "encryption": { "mode": "repokey" }, "repository": { "id": "0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", "last_modified": "2017-08-07T12:27:20.789123", "location": "/home/user/repository" } } The same archive with more information (``borg info --last 1 --json``):: { "archives": [ { "chunker_params": [ 13, 23, 16, 4095 ], "command_line": [ "/home/user/.local/bin/borg", "create", "/home/user/repository", "..." ], "comment": "", "duration": 5.641542, "end": "2017-02-27T12:27:20.789123", "hostname": "host", "id": "80cd07219ad725b3c5f665c1dcf119435c4dee1647a560ecac30f8d40221a46a", "limits": { "max_archive_size": 0.0001330855110409714 }, "name": "host-system-backup-2017-02-27", "start": "2017-02-27T12:27:20.789123", "stats": { "compressed_size": 1880961894, "deduplicated_size": 2791, "nfiles": 53669, "original_size": 2400471280 }, "username": "user" } ], "cache": { "path": "/home/user/.cache/borg/0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", "stats": { "total_chunks": 511533, "total_csize": 17948017540, "total_size": 22635749792, "total_unique_chunks": 54892, "unique_csize": 1920405405, "unique_size": 2449675468 } }, "encryption": { "mode": "repokey" }, "repository": { "id": "0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", "last_modified": "2017-08-07T12:27:20.789123", "location": "/home/user/repository" } } File listings +++++++++++++ Listing the contents of an archive can produce *a lot* of JSON. Since many JSON implementations don't support a streaming mode of operation, which is pretty much required to deal with this amount of JSON, output is generated in the `JSON lines `_ format, which is simply a number of JSON objects separated by new lines. Each item (file, directory, ...) is described by one object in the :ref:`borg_list` output. Refer to the *borg list* documentation for the available keys and their meaning. Example (excerpt) of ``borg list --json-lines``:: {"type": "d", "mode": "drwxr-xr-x", "user": "user", "group": "user", "uid": 1000, "gid": 1000, "path": "linux", "healthy": true, "source": "", "linktarget": "", "flags": null, "mtime": "2017-02-27T12:27:20.023407", "size": 0} {"type": "d", "mode": "drwxr-xr-x", "user": "user", "group": "user", "uid": 1000, "gid": 1000, "path": "linux/baz", "healthy": true, "source": "", "linktarget": "", "flags": null, "mtime": "2017-02-27T12:27:20.585407", "size": 0} .. _msgid: Message IDs ----------- Message IDs are strings that essentially give a log message or operation a name, without actually using the full text, since texts change more frequently. Message IDs are unambiguous and reduce the need to parse log messages. Assigned message IDs are: .. See scripts/errorlist.py; this is slightly edited. Errors Archive.AlreadyExists Archive {} already exists Archive.DoesNotExist Archive {} does not exist Archive.IncompatibleFilesystemEncodingError Failed to encode filename "{}" into file system encoding "{}". Consider configuring the LANG environment variable. Cache.CacheInitAbortedError Cache initialization aborted Cache.EncryptionMethodMismatch Repository encryption method changed since last access, refusing to continue Cache.RepositoryAccessAborted Repository access aborted Cache.RepositoryIDNotUnique Cache is newer than repository - do you have multiple, independently updated repos with same ID? Cache.RepositoryReplay Cache is newer than repository - this is either an attack or unsafe (multiple repos with same ID) Buffer.MemoryLimitExceeded Requested buffer size {} is above the limit of {}. ExtensionModuleError The Borg binary extension modules do not seem to be properly installed IntegrityError Data integrity error: {} NoManifestError Repository has no manifest. PlaceholderError Formatting Error: "{}".format({}): {}({}) KeyfileInvalidError Invalid key file for repository {} found in {}. KeyfileMismatchError Mismatch between repository {} and key file {}. KeyfileNotFoundError No key file for repository {} found in {}. PassphraseWrong passphrase supplied in BORG_PASSPHRASE is incorrect PasswordRetriesExceeded exceeded the maximum password retries RepoKeyNotFoundError No key entry found in the config of repository {}. UnsupportedManifestError Unsupported manifest envelope. A newer version is required to access this repository. UnsupportedPayloadError Unsupported payload type {}. A newer version is required to access this repository. NotABorgKeyFile This file is not a borg key backup, aborting. RepoIdMismatch This key backup seems to be for a different backup repository, aborting. UnencryptedRepo Keymanagement not available for unencrypted repositories. UnknownKeyType Keytype {0} is unknown. LockError Failed to acquire the lock {}. LockErrorT Failed to acquire the lock {}. ConnectionClosed Connection closed by remote host InvalidRPCMethod RPC method {} is not valid PathNotAllowed Repository path not allowed RemoteRepository.RPCServerOutdated Borg server is too old for {}. Required version {} UnexpectedRPCDataFormatFromClient Borg {}: Got unexpected RPC data format from client. UnexpectedRPCDataFormatFromServer Got unexpected RPC data format from server: {} Repository.AlreadyExists Repository {} already exists. Repository.CheckNeeded Inconsistency detected. Please run "borg check {}". Repository.DoesNotExist Repository {} does not exist. Repository.InsufficientFreeSpaceError Insufficient free space to complete transaction (required: {}, available: {}). Repository.InvalidRepository {} is not a valid repository. Check repo config. Repository.AtticRepository Attic repository detected. Please run "borg upgrade {}". Repository.ObjectNotFound Object with key {} not found in repository {}. Operations - cache.begin_transaction - cache.download_chunks, appears with ``borg create --no-cache-sync`` - cache.commit - cache.sync *info* is one string element, the name of the archive currently synced. - repository.compact_segments - repository.replay_segments - repository.check - check.verify_data - check.rebuild_manifest - extract *info* is one string element, the name of the path currently extracted. - extract.permissions - archive.delete - archive.calc_stats - prune - upgrade.convert_segments Prompts BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK For "Warning: Attempting to access a previously unknown unencrypted repository" BORG_RELOCATED_REPO_ACCESS_IS_OK For "Warning: The repository at location ... was previously located at ..." BORG_CHECK_I_KNOW_WHAT_I_AM_DOING For "This is a potentially dangerous function..." (check --repair) BORG_DELETE_I_KNOW_WHAT_I_AM_DOING For "You requested to completely DELETE the repository *including* all archives it contains:" borgbackup-1.1.15/docs/internals/object-graph.png0000644000175000017500000116124113771325506021715 0ustar useruser00000000000000PNG  IHDR (&[vsRGBgAMA a pHYs\F\FCAIDATx^-'p LFWLsd'7 'iתC&D ZTK$$A@ CO]]k~][׹v:Q``````````Jf```````````Y@pS;`````````(ŀPP. 0 0 0 0 0 0 0 0 0 03 (ζi 0 0 0 0 0 0 0 0 0 L7 + 0 0 0 0 0 0 0 0 0 0L 8jv 0 0 0 0 0 0 0 0 0 0P\\``````````fP,%m?|Ӏ`````````n@(W(``````````pf%§'N``````````BB 0 0 0 0 0 0 0 0 0 0 43 XJ:~ 0 0 0 0 0 0 0 0 0 00݀PP. 0 0 0 0 0 0 0 0 0 03 (JOO1 0 0 0 0 0 0 0 0 0@)rr``````````i@gtM```````````\\``````````fPW;c`````````R  2 0 0 0 0 0 0 0 0 0 00Ӏ,`)l 0 0 0 0 0 0 0 0 0 0tBB 0 0 0 0 0 0 0 0 0 0 43 (>=vj 0 0 0 0 0 0 0 0 0e```````````YR7 ``````````rr``````````i@gP"|z"\Ԏ`````````J1 + 0 0 0 0 0 0 0 0 0 0L 8o0 0 0 0 0 0 0 0 0 0  2 0 0 0 0 0 0 0 0 0 00Ӏ,DDک 0 0 0 0 0 0 0 0 0 b@(W(``````````pfKIg4``````````e```````````Y@pS;`````````(ŀPP. 0 0 0 0 0 0 0 0 0 03 (ζi 0 0 0 0 0 0 0 0 0 L7 + 0 0 0 0 0 0 0 0 0 0L 8jv 0 0 0 0 0 0 0 0 0 0P\\``````````fP,%m?|Ӏ`````````n@(W(``````````pf%§'N``````````BB 0 0 0 0 0 0 0 0 0 0 43 XJ:~ 0 0 0 0 0 0 0 0 0 00݀PP. 0 0 0 0 0 0 0 0 0 03 (JOO1 0 0 0 0 0 0 0 0 0@)rr``````````i@gtM```````````\\``````````fPW;c`````````R  2 0 0 0 0 0 0 0 0 0 00Ӏ,`)l 0 0 0 0 0 0 0 0 0 0tBB 0 0 0 0 0 0 0 0 0 0 43 (>=vj 0 0 0 0 0 0 0 0 0e```````````YR7 ``````````rr``````````i@gP"|z"\Ԏ`````````J1 + 0 0 0 0 0 0 0 0 0 0L 8o0 0 0 0 0 0 0 0 0 0  2 0 0 0 0 0 0 0 0 0 00Ӏ,DDک 0 0 0 0 0 0 0 0 0 b@(W(``````````pfKIg4``````````e```````````Y@pS;`````````(ŀPP. 0 0 0 0 0 0 0 0 0 03 (ζi 0 0 0 0 0 0 0 0 0 L7 + 0 0 0 0 0 0 0 0 0 0L 8jv 0 0 0 0 0 0 0 0 0 0P\\``````````fP,%m?|Ӏ`````````n@(W(``````````pf%§'N``````````BB 0 0 0 0 0 0 0 0 0 0 43 XJ:~ 0 0 0 0 0 0 0 0 0 00݀PP. 0 0 0 0 0 0 0 0 0 03 (JOO1 0 0 0 0 0 0 0 0 0@)rr``````````i@gtM```````````\\``````````fPW;c`````````R  2 0 0 0 0 0 0 0 0 0 00Ӏ,`)l 0 0 0 0 0 0 0 0 0 0tBB 0 0 0 0 0 0 0 0 0 0 43 (>=vj 0 0 0 0 0 0 0 0 0e```````````YR7 ``````````rr``````````i@gP"|z"\Ԏ`````````J1 + 0 0 0 0 0 0 0 0 0 0L 8o0 0 0 0 0 0 0 0 0 0  2 0 0 0 0 0 0 0 0 0 00Ӏ,DDک 0 0 0 0 0 0 0 0 0 b@(W(``````````pfKIg4``````````e```````````Y@pS;`````````(ŀPP. 0 0 0 0 0 0 0 0 0 03 (ζi 0 0 0 0 0 0 0 0 0 L7 + 0 0 0 0 0 0 0 0 0 0L 8jv 0 0 0 0 0 0 0 0 0 0P\\``````````fP,%m?|Ӏ`````````n@(W(``````````pf%§'N``````````BB 0 0 0 0 0 0 0 0 0 0 43 XJ:~ 0 0 0 0 0 0 0 0 0 00݀PP. 0 0 0 0 0 0 0 0 0 03 (JOO1 0 0 0 0 0 0 0 0 0@)rr``````````i@gtM```````````\\``````````fPW;c`````````R  2 0 0 0 0 0 0 0 0 0 00Ӏ,`)l 0 0 0 0 0 0 0 0 0 0tBB 0 0 0 0 0dӧOׯ_Q`6͛ߟwNj 0 0 0@nkV}````7|sv5```?L```4 !u{ 0 0 0 e ޽{G `X۷r=```4 ૮_ 0 0 0 0&6X1 0Pϟ z^- 0 0 0@!4Fv 0 0 0 g@(7'z ߀P{h! 0 0 0@c@(W(```` 庱 0r)j 0 0 0{m 0 0 0 0 c@(7^z c@(^z 0 0 0 k````H2 놲 0]) 0 0 0^nqk`0 0 0 0 0|za\ 0Przi\% 0 0 0PYP````` ɀP*0 ,o@(wr 0 0 0{HپY 0 0 0 0@>rq 0@9rq 0 0 0BfAa````$Bn({ 0kʩ2 0 0 e f^g 0 0 0 0ͧƅ^0-ƥ^2 0 0 ```` 庡 0r)j 0 0 0{m 0 0 0 0 c@(7^z c@(^z 0 0 0 k````H2 놲 0]) 0 0 0^nqk`0 0 0 0 0|za\ 0Przi\% 0 0 0PYP````` ɀP*0 ,o@(wr 0 0 0{HپY 0 0 0 0@>rq 0@9rq 0 0 0BfAa````$Bn({ 0kʩ2 0 0 e f^g 0 0 0 0ͧƅ^0-ƥ^2 0 0 ```` 庡 0r)j 0 0 0{m 0 0 0 0 c@(7^z c@(^z 0 0 0 k````H2 놲 0]) 0 0 0^nqk`0 0 0 0 0|za\ 0Przi\% 0 0 0PYP````` ɀP*0 ,o@(wr 0 0 0{HپY 0 0 0 0@>rq 0@9rq 0 0 0BfAa````$Bn({ 0kʩ2 0 0 e f^g 0 0 0 0ͧƅ^0-ƥ^2 0 0 ```` 庡 0r)j 0 0 0{m 0 0 0 0 c@(7^z c@(^z 0 0 0 k````H2 놲 0]) 0 0 0^nqk`0 0 0 0 0|za\ 0Przi\% 0 0 0PYP````` ɀP*0 ,o@(wr 0 0 0{HپY 0 0 0 0@>rq 0@9rq 0 0 0BfAa````$Bn({ 0kʩ2 0 0 e f^g 0 0 0 0ͧƅ^0-ƥ^2 0 0 ```` 庡 0r)j 0 0 0{m 0 0 0 0 c@(7^z c@(^z 0 0 0 k````H2 놲 0]) 0 0 0^nqk`0 0 0 0 0|za\ 0Przi\% 0 0 0PYP````` ɀP*0 ,o@(wr 0 0 0{HپY 0 0 0 0@>rq 0@9rq 0 0 0BfAa````$Bn({ 0kʩ2 0 0 e f^g 0 0 0 0ͧƅ^0-ƥ^2 0 0 ```` 庡 0r)j 0 0 0{m 0 0 0 0 c@(7^z c@(^z/_Ss޾}ǵ-ka] 0 0@Nr͂ 0 0 0 0IrmJ1 r)v_~] _s]g&]g:vlwPkf``N3 0 0 0 0@mnں9 0@]rKxaoV(ufuc2[`` 'N3 0 0 0 0@\7sm[xdR \ewfʽutرݱC՚`m 8 r" 0 0 0 0Iri:3uʭƷ~/aׯ_ L\bLXc+ 0 dIa````H2 vN7m 0P\Kl?s(ۧ8Vk`` /I75-~0 0 0 0 a@(=Y'w 0P\K7n75;WM``5  0 0 0 0 $uz hg6 ;\#Ԉ5& ``| $l|7zG}5jK \,``πPn}=7X߀P5XK3 ti 0 0P\AC[o%/zDm? 0 0 ׀Pq{g 0|{cMrզb`=z4Çӯ9=߃ߣ9c{``r6 g>m 0pTBծϮP~^`` %x)?gn~S~lHW3իWyt߿?؟xߏ?xz)3f^#=b```Bԙguf2 [Wo^€P.GK8 ``u d(i3rkjnW0"O !5{ܾkDv|Q0f{㶾; 0 0@r.ٷ}2 ^r;N``  2ճoFaXΣGÇG |u-]jjr6c%f٩kd_ }a`ՀP.ڴ]l2 {d}Bԝwug`H1 +@R1ch.fm~"P64ׯ{CG |JyQ{V( И8kW a$``r=_/ j@(\mڮ|m Fo``2@n/^P-7B}G |v&En>^[+ꤐ``R rk8aW/ԋ5\8``׀@ffL%Eo",aJ1+n; ){lgB2NRCR8׌'[`` r_{= m@(Y~yƄrq 0 0@9rr@y);ff=jsn}hrzӃz^s5Ga[9IKd`рP.9M\2 2|tö{Bלs5g`H5 I 3a^Ws,]Vbܘ>kGQͥYLMkAz 0 0rYIu0rkŕZ1\cX``׀PP.8|~bvQK!G9rߗhلmDKo``4BLf090Xr/K`` df4t]%7º[_Oۜ"PF]h:ʍtmc1wNvPnrej(Re>ttqzg`2 ˅c 0r)jZ\K7ng`#e`G?cg(wK/]؏):6Ԙ{(ѣG52%w 7%۷๙݉+ 0 0 1 ? 0m@(06k@(f```;;2Az~Agٳgo> Nglp/lor˔Pn__!qrPnfֻ1Q՝``5& ` ._SNմtBn1 0G6 Shlr䞳qr#(}}1!#rPn8n)RݱqcZ% 0 0ͷ7ƍ05 {wz\g1 0rr@W4n^5B>$o[_3uG n]1\G ~s\}Kk,S/``f@(wZxS7`Krp``\fƚzf`` d||;9׺+t68cƆ={lLi7jpPuI ~w3[4Snm4^[``g6 kl 5 X3^  0 0\\v4pPnlgj3 yoɜJKJ(7@nC(ww'Rj 0 0kek-[ l@(irՍ7uc`€@掁-lyHʝ3; nݫ.CGe9˫Pn# 0 0)r9Iq50 厫_Pq`0 0 k@(W(  b㏿. KΩ˥P/ FK>x no>1 0 0zb  0p|Bq[enks 0 0n@ 3!T:(W+O_(7@n)@ccz 0 01 {^Wz ǀPqze\U.rYŢ``\\v4:[߿xA,SiΌB>KS{4Sno+qi 0 0 L3 ;n 0%B|8F00րP.3cx=3 0 0v2w >Bk_w R aVS} +[Bo1\}%rs},G 0 0 00ƀP./cx-/ 0@ܴ:N i@(x0``5 +Bpٳg(rʍ=IB}c3::Jo``B>r[enksw|sBq 0 0@2w dX>x5Gr]3 G4Y^NaKwu3zwPnja9 0 0 0pTBծfr6 g>m[>r/ 0 0a@(W( lg2mׇܜTm5{~%l{Ϻ82 0 0כNx```9BՒKŀP.X3 0 d<"ۼ_ ExÇ< |5еnhk冫CgϞ*qPnT cǝ/{VOd``OBƃ 0kʩn@(ҍ?``rr@_x25H(C-lKoo_%{Pnߘto$``f@(٣2 rz1/By``s;2 H-w)1۵L1v̻9%u[.:NB 3ǶOosg`` \g3 b@(\,ڎX=N+b`3 +bҾM-751'ʍu0=x`i>f}Jc^Wɛ9 0 0rӟueTk=BՖ[e`k@ s@zd߿ip ҡܨ1nW95{]fr ?}A9OAܾqh2Sz3 0 0wBƅq 0kʩn@(ҍ?``rr@7[gϞ} hr i/շPnzjRy_Ρܾ@n˘zzD``(рP.%O\3r۠ϠPzf 0 0P \=R|,``̀P.󵙷믧9ż^_ͻ?Əh@(]&.``? dfA) zj0Dۘ3B]n޿j BG)7Rpٳg5 \vգPn31fǫ5C<14.S{; 0 0etoK&TޓӏPcP(}2)'B<Ѷ 0 ՀPP.C6onbo{#w0i^ڧƍ_ 6~^V2g[.Sf!V(7\ u 0 0%a=CMgmͷ7ƍj@(\m.6``3 fH5(80GpoF1kcC7>׵ l0:6IJbr} 1Z0Pnl_ kr2 0 0 2{4ٸ_D/G6Ҕ}Y>i~xrÝu۝Pg 0 0PTT25Ç3 ^zm CA47r@4ZKr>I?̵g S;w>}zˤ`ce2 0 a@(W^/}5|]ZflSڦ_zs]__:nCegvy}Ǭ,j ör7 0 00ƀ@f1Mڲ}7̜P?s 󸹟Z؏xXs>>r+𬯖C)O=t؆1˛;kr3K:˘m[n} ߼yS8g4``3 [}k0e\_oyM=εg?/5w7)~o)k6x}ǧ{,oɾ:rש+ 0 0n/Sapj nZ?)ׄ Æ>=W/n7Ǜ|m20ehȐ3 0 0}ur#l/^ Mq9LͬAqu1+$-T#4_/}ʍ0U-Γ ΡL(1$1|p丏Y|fKczǾ{i.oSkuB5CF 6VA,7!ܧO.2B7n8]]]^|yͷƚ0m 5+k C 0 0 a@(|w}B<\bڳrʍk(i,eyO[B,Ϻ鯆P9Fj|(^7'>whKеϝKm=g 1V}u_~Η}]wϟ?zϏOSիW],/ګ}'rՍ7u@mܸ::Myܼ&>Yx[J](cx_`1FAZn;<密|}5۔e|S½Krc7־fϱʭ2׉sA5al |댷cN^{SZcY3 ,i`j(W8%ZO 0 0}r˷q)۞b]kYr(;,O]i*B,foZ(5=l3^Z`cǖ5kuonݕH ~ƶ?zhr$c)|tLݧڅr8O1=1s|^ϔ@i>N߲66hNoߞ|}` YέhW``xr׳R(7\#Hе&+ȘC`yzrP. }9JvDps]ϕk͚nB k|cvJe`h)}v:`l3>x3F`R(wJ }=~(h _ɱ{u0 cڿoߞnܸx w˗/¹ di`PpnS?{``TB[ v`;evK"G?p"xvْR\xXlSn=B{|Ka cy>y}λـPp(75}3xpĔ`Ǭm MX~1ׄ)םX w]w:#;}c~<߷د#.sMu}m}2b} kr'XG= ,x 0 0Pr{یۡPn+5%XvXv1kL}1Ik:RݼmϯǬs;6u5Sd6>}t}j3<~xsrz{X++?``Bo\r}]ԙȆ]HKr/͊G ծ׵q޾&Zڶ/, /SƎPPg]OΣ2Ğ}ory/"+8@V ޺c`o@(=)ܮׄԇ!%17r HѨKx8zL(=6>5ea:c=Sf! m BzsYH5 N(7%gOyw,q=e{bϔmz3|j-x~. et]Q׭CB4=)uwvt)t8 w/_uL)ckƍھ^Y@.5ܸ7$R'QzzU(W8Sz 0 0=rf S>Hnr}Lسژh8 rlGW+PPh:RҺS:-94.^(9~N?Br#ܞ{{>N$7ԐR|(u.>HǏXW^N z/s^w^԰c#qޟu>7M?j8Tx]1,w̶vw9ϡ}BÎ}lqM2RNf) :ű>OD?ؾcՔcǥϕfS?v.^o;8=uw\3Dfan8ϹY\@y gqAO``4 [PnA,u>]z='7kl{+Ju}ȱaخ}6¢6x)9Pn`ju."~מ+ƃ.M"Бrl[w,ylKAc׺)ծq/^sPe|Wr v==7>s,cK,7/c5L C}"^J1]}&0<6Cs y_[ߘ/sҔOuU(7sJ 8ek Png \]l/c8t~is}c/  ==a.u\Yc-uιVdn֓zBsNCy})^\\㨤qd_xf`3 ^msqm?8jl}p|]Ѯ-ʍens3T=1.jσ}3GCj>[,k%}#US =]M5Rc,αW蚽?[3/5?7~f E]RsޡsEf7mFs.ˮeߌ)ǻvk#T؞"n)85]_Kc3%;fK.C?c|;S\Ck8U@}eYsŃO{]{bmZP>_n_Q-Rmz ̭ gA3.1e /߾j @.\\d``ˀPn.Ƅr锇*]T d8јj/rjfv7R;ـPQw5ӧqB®c! ۶Ln\mo7 0 0@r7:& 2^]k cߟL}!A7O$Pn׶ =Ϯ]~HT'^_(wo˗/;ys'C].q,I Nktm|9h:M 嶯9f'߇ޥP]gHct-̜3"NOC7>%L \G)rߢ}ڞ:uQ).lKv}}~]˻n*^ \8@mo7G@\e>?# 0 0 oxL(7< 2f雕*^CaC1R!C3FS .c{ej_}S=oN_.~Ǜ<9_oj\ݮ:ޏ?ݻgjk]`CSw{~^ vٿ9ӥl_H9^2Eܴ|*N9'zO5ok_Enfi~ ]ס)x}JrL@x;p ܔ@nԮ:5mh|Yhc1y)|=>/'u6/LC޳W,H^s/E(r?j{yfoc;>}ɍ?l_1z 0 0Pݏ BS@u= l=dfq`ܮ?Q;aqK_۳5cyBnׯ9E(7&'i駟:CKzBCr2 yokyG)wl68vۮ٩c^,)u,f_s`Pn̢u5kz_kSgwWƨPn13gy,B3M}NDmn15Qlno`n:,xlׯcC1a]WS(ww=nb2vk/l/}2gxQek S3mh!l~Ԁ6:.cb6ܿUo6;a`(ՀPnr')j+84JA99-5tMNcC>a:d.ZrWGۯC}a¸fPns^_|9uWeP }h썗fszhq>&un5V(ckyY{ӸiflϨ{ȧuc޵}VZ>:,};F}싱6狟C '_yc”]vor}W|celלhoW8oc</7kz )wW] 5nݺ mlG6ǣ%ok-K8;82 0 NQM t7N$b׽?yvob_Tӷ?{~=&naj 3vCYc}_dϟ;Y2#[15ؿv(7o}-8{? [wc^u:)wrSf9] qnE[)#T;';?gulfN=So:gl8>Ocݩ: @3[BMߖ<C4ڎb S;vDSPOߏ ]Oݖ׍N=g;aʗ>G A_o/<ω425)I%ݻwYܹ hߌe3{(Ww^7j 0 0pDB廝 ]Lķ47fc*5!q~8c_;d2]z~;7塱÷,?:k{N&EuV8=y_s+W{} ]?>g ;~Ύ r/֌)^/מu { MS7gJիW;zm~0YH"ngl˫cg}-б}ھE7~vi 4kP"5,X7Bf@T&7;xZj P01W꘳_l3 0 厫}M ͸7PM SC1K)X֖>cC)\O ُv hslڳTquW]kkp7)>\˥~^r  {e`ӀPn}YrL v=4/}֓0/ֳD-SE>M ؇t+3 ,ayoYr=jTBr~hf1rSÔCO_!qLlJ=מ.c;.vc; Sv؞յSnQڐ5(@nvJ2Y{N;KbOmxɔg`hR3g?XwM=˭c x )wwͦ7/f? ^ 0pn P0.M 0 0 t-ŜPnlQ]ȤIAKAvE3WϜ`vˮ奆Oؖ9zg ,y6 uCخ7.G}8uC1C3vR8/ozr~9&:%X {~-;A^ǵM;5)✷/p^3'%ij݋&H?y~W9ϱZ6}m̳:d@(wPn;cS™:s^۵1!)־u{_/hQ{6ڡ^^׫}Ek(LbyM; :ZmzR_;5TC 7qj2B}o^7r!ڔڵ{'m~!P]kS}s3 k, <|pP۷on6xm4W(WLO 0 0M 冷a!nPJyxd ƿ}(8'D:]czN(vmf1ǺYSfO/MF91[(82*BrB|糱.KnEj9&l655d;l:ۜk͠W)v^Cv/%9^Y׶=\'ȍlE;2LjA@eJOSR=v{{S7u~]_ ۟%}͹w,{u5-,y90.ӵC¸Ɲq 0 0B{Y,4Ǧrfujz>VY缷k .-sO2~Pzl -ZN6;1[(82*BˇrB|cSS|Fp)ϟ?eVϡmn_oD@.4mySž[ro)Ƽ&PnP(2@Pn=zumfꌻS)c^zm;k9ka5c9u?rt?u?uk|Y:ʀPr1?l:[O޽}9 c`P0n>=7``Hr:7'6zgt@+S] b?Rf(z:]3'K(k䩽8ᐡmjyh~cPzfr E).RHqxMO91)cxkr̟;:F4e0qы1w.)?CV5SnSد1ק}_Xn=&c]vդ}k}?_F#mMfC_nh9bpw\fe˗/of`'O:?|`]  0 0rwD(aOz: kƜ1Nyl%ePn13-q.[)Ǘ29u/cPn>0.(rCgn=KCa8m 2Fة E+9&(׷}XgxM:NsB],z΃Bc;ˈ ہ{]N k;gWH=֛}ׂQ؎'{j]ǀ[b.Kr_z՛Ihs)5 ?K}io9 ߞ! rl OW]kr3~/cx}3c|,]ƺ Cw~~TY2ZD۽ӧO;w]_'X:+lxWO`` ?8I}6eAƮXKr~ؐA{`94N.)(=alö{mͷz`@(7-j^74cvRC]CLُlCן g F@k{W1V\޶f)_kz׏Ͷ.u.u;d)׍c֕NGKr/]/ rf!NQJ/ڵI?з S/mhFǎ1}[}]K{lo/+ =4+p__XJw^|?Bf@̘.̽~˗;5`~ͷRq'뗪R\a\|) 0 0=,M}r>M =M}.#R.1;yMTĶY~\a9%y}_zo(T=t ǡ}rAX)nS^\='Y2s*mwƍ7oj@D8ݻws~g _q1{~RSl㟟)BK!qnxv K[*p7q^5͟ zƾ9pĿEmR[Xvlo{ͺPϚj06zM].Sﱟ]M{W?!s6jy_::7e;b)ǀ [:_> z>v|\ սc~֮ׄ1}4%v޽ӛ7o~w>j ]:!l?XMKr6¸Ƹ1 0 0rwT(7<5aP65͟ڏ9a9sBKl˜wϥ{b]M!5(y8<٧8=_z7ft 5y:P+DL-V{=5rYϏce1{ofl.zm2۷oO?).4kwLX>}F<ƝcMǚǡzضiBƛl@(׸f4kt}n_^-GW  2P?Ǘy]*z@.[IB׃K3txSꤎP0nFSx 0 0[ʭZ\7m)]2){լ{K~Y6mʶ_ˊ}e}O p-wήz\%%L]jϢMϻ[14Dlӹźc:-}/*^_(w%Du_T+6ף,ΜkTλ?]2 d}׌cC]Gi}3kăxp<slc+~iv(Wױ|mg```MB|˲bZ _}=ݾPqvG3`MffIc>BB l/f&60 w̍k׾5ڵ?sdM'YoE41;t,s,aWP0n}'e5?7 0 0p\Bqw 0@rq7e3WkY,l֔Q@;[3^o^| d4h{/'? DlNmx4?G3Byo5Kn+>|xzr``ҀPн Y~10 {>O\sh[~|gDmm;9+z_DO45wز 6u83|k*;eѣG'1%wk?7͏ 0 0@rٿ}2 Z,\[Bָ۶p*fٌg|1 x,G@1zflrͺ@6`E>`Y?rǟYrqzko  0 07 {z g@(7'z\Fs7je` dȬ}d(oxS}>LrŋB<Y|``ס 0\F=#mBL8.0 0 k(8wG H(wlW.͔7Kr 1 0 0p|Bq 0@~rq'e4wQ`ـ@NAKrcFFhqƌ?K-{_;{ާmL O(wcP/^/``883` B8ң 2I$ 0 0\\ 6t(7?̝ʍeLXvŋCb!xm߾s,ܷ?S%j?崃ұ:/*5흺x_]د^zչPg:_7S!9u^' 0 0 00ǀP.?sx/? 0@\c``\fƚzf`` d4HzPng_tl "؄;f߽oMt:}5OY^{W>E35AnL9/]Sk?T:v)/,lS7{f|Bs)μ``r5 f6m 0pdBٯmǯP>u]``R  2PBcCѡzƺf(7f\}BC:}j(7sAؔ5]}:wM}XGك#p<(y5:c`` G# b@(R,ۏ, nWk՚```̂c1x}yC\ ̩c£CM ^ {:RBrb{ifۗ]J(7j227?CzUr0R(7%ۄ;驞2 0 0}Ͽ3e-ƫi@(5}Y6_ 0 0<BB l`PnhR4<:ilw~):&L}afա_c<{) Rg rbbYg/ohFKXZqi^3Cԏ``2 WU_`} [r=[- 0 b@ @f-g{Pn_ 2C}ͤ k~7M /ڧx&C5±mX":(x|)7Ks{ިag~~``g@(w\R/`B8N  0 0\\ 6d(7¤]@mL}վx}_7fY;F#©sfnF(wͶsfcfH{7z 0 0@^r 0@r[mKo 0 00΀@fLa`(^Kr|3vPd:g[ ""rcqPnW`xjkSקS[u e``n@(׸0.`7 |M9U 2^q8 0 ـPP.X*q).>=?ΙwPhW5j5CeP;ԯ^]>LݾKܔٙk1 0 0rkŕZ1 e%Պױe`` _2 |Vʍ\O_x:c}F(wJ 76z[~׬c ƿOS؛N9v2 0 0r``yBהS5-݀P.3 0  2Pj/w}?i ÷ jWxvNm((F(wjxćҡk1K]{d```@(#82 2{4wB8``>2 |/]nt dz OY l:rcLSC} ln```irՍ7ucd@(efg`΀PP.X#ߏRv=c}onسP](7}[^ 0 0 0\pg1@r/ݸ[޸P5TM``2 9"\T-VzjT oCG(t-jٞx(W߾~tѣUө``g@(w\R/`B8N  0 0\\ 60'ۄ0cVׯ_ 6CSsg-yܮzvjɉG'z7 0 0 ۀPg 0|8u,w] Φz ƿ N yy"JkL<1 0 04 {̾o m@(7?\.stid`0 +@A+;M=@O {vZZޘiccg o_z}^s 0 0 \6 k# 0]ir/ݸc`l@ @ae9rf ~oax084ܰgɡܵCcz>OcoQ``ÀPn}6րP[K0 q  0 0P\\ 6C(ѣG³Csؘ{=K>x`wzf{1 0 0r``yBהS5-݀P.3 0 d<2L۾́5P;C1>2±(ZPe[1p~i̺v:# 0 0uʭƳ>3ݶ|w r9.}`J5 +b&ט5BAd^z5㽩B]AϘE7u__׷ׯ_.on3rcjRwc%Sx=sXA```BƂ 0kʩn@(ҍ?``27dm_`v2c)3r ;uߍܚDR#Pn_DŽv8*OSk"ԛ:P2Ǜ>g``>Bl8>0 ,o@(wretq`8\\667 iׁ/ȔO.֮0r /CaϘ1o1 {ʍvmӥg}kϗۧ:'q 0 0 08Bŗz1 $ʼnprn@(``| dn4 [Pn3£@/~G}Mmwn3PyM7?4cmi_ћXN'-xwcqn 0 0e6y€P.g[8 OS?``  2ZCMr(`{L1 aC_>jl06jtbb{z6OCNfS?``΀PvZ`z kZ2 R,%```y2!^jD(7f(M`cL)˟AulXFJsÞ9rccf15M5Z_ͶŲRS:``o@(w|8S3`!B {Frp\``׀PP. ulF+\0exϜ5Sӱ1nE7m>iײ=a```}Bטc5f3 [_ύs=k@(!```=2a^jÇ j_@2B]1ٳS~#?sc֛f"$ۆISS7zYK//Ŭ}Z.Qǔ~5!؎ؿئnN{>>3 0 0f@(7N< 0B⵼\ ``5 +"XF !s :{a``XڀP.SK<`5\8``\fƚzf`` dn{;9ֺkܘQoѮYr#٦m3 0 0ʭƵ^3ݮ\u)rY.Ų``J4 +Ff}Yoȶ=Knx߿/QJ<''2 0 0 0s 24א3 ݀Pqa\00րP.3cx=3 0 0v2@jW< v͒? n+B}Z3 0 02 {~_ ÀP1d==>~2 0 0@/7޿ nDիW/B={nj~}ܿz 0 0 0 a@(X0`7 |M9U r3NAY``Zxb```` 2)c8]5= ql_^o,0 0 `@(W"```` )^Mq1 f@(LڞM޻w/E?ut阓1G`q9f````$BnY^ 0)r9Iq5xqցܸnr]g&]g:91 0@i9f````$Bnv090X׳ ƶ{N(ufuϱ?_ 0 pD```` puSZ`efg& |ݻwrrub`j6tw````0 k, 0]) 0 0 0^r͂ 0 0 0 0IrF%do``@ڊb````\¸`XހP5TM```2 k````H2 F^7=(ـP.%o|3 0 0@mnV@ 0 0 0 0rp`€P.g[8```m ```` )wn3 0Pܺm|7 0 0 0P@```` ŀP.')N``qK```r6 k````H2 fw7m 0pTBծf`` g0 0 0 0 0P. 0]) 0 0 0^r͂ 0 0 0 0IrF%do``@ڊb````\¸`XހP5TM```2 k````H2 F^7=(ـP.%o|3 0 0@mnV@ 0 0 0 0rp`€P.g[8```m ```` )wn3 0Pܺm|7 0 0 0P@```` ŀP.')N``qK```r6 k````H2 fw7m 0pTBծf`` g0 0 0 0 0P. 0]) 0 0 0^r͂ 0 0 0 0IrF%do``@ڊb````\¸`XހP5TM```2 k````H2 F^7=(ـP.%o|3 0 0@mnV@ 0 0 0 0rp`€P.g[8```m ```` )wn3 0Pܺm|7 0 0 0P@```` ŀP.')N``qK```r6 k````H2 fw7m 0pTBծf`` g0 0 0 0 0P. 0]) 0 0 0^r͂ 0 0 0 0IrF%do``@ڊb````ŻwN_Ǐ_~ָOo߾~MO 1>}Bz+s)= 0 0 j@(6_ 0 0 0 0@>rq͛sΩgoݺuoO?I=pӧO/_?}=F_#P?ݻwu7n޽{zI}:~ wS?O```r  2 0 0 0 0@(۷{}=|P/c/roF5}uueLPK~S߃cr/K```.HD 0 0 0 0 0 {<1YqSC1Kg8o~5^k3Ck,G_kqS(8J' 0 0 0PL o`=0_```` GBr!%}ˈo&}/_"$doce}w|Ǭ7o\ׯ_;r}_jN```R fLJ#ƀg```XۀPqK6ϗu-Ν+?}t޸qCsϫ5]}>Ϋ 0 0 0ܝn8(1 0 0 0 =krF0ӧO~_駟Vk8߾}5K|5#r}Bԝwug``XÀP7he:00 0 0 0 le@(7k[7w9}Eo{̯_$ǏuFz@n5;}ݠS>r|WW```NBހ1  0 0 0 0ܼ}Fn_cɓ'B~+c7V%|vջw}c}מ|ÅrMe``` OBX?O/ 0 0 0 08B굵~웿>|ip/_ܧO}zuj_O_```: fx`s0껾3 0 0 0@r5]aB~+gYr|󍾮/_כ7obԶ}B~f 0 0 7 M58TC5d```2 W?GcgGh4£>?~[_/+o1 0 0]U_e```1 O݇ϻ…6޽{{_o޼)3cˏ١c Տ9^d``]AeP1 0 0 0 j@(7O>=6|"}xco޼C>K}]s?r|WW```NBt1 0 0 0 0pByZYjso}Vo}}QQGǏg^lPn1 0 0]&)TOd```2 W?q֭,B~›w^}{Օf|H(w1x 0 0 0{````fc ײ>0v\```rr````` @a2 \~3zzz 0 0 0@nCR7````5pƍ,dw^}}䉐_ 3rz' 0 0 0  0 0 0 0 ;wo~ZoAG7},ǏeǸz' 0 0 0n0 0 0 0 06?! ׮]͛7`__|{O? W\````1f. 0 0 0 0 0 O }v޸qC_o{_#;]z 0 0 0av7]`````)fsVǏ `֭[J_ 3 0 0 0zl_U[e````ތ?e]K}]ӧOwW_Wc@5US```#u````8ׯ_޽k~+sd˗/n>E;|m^Ϧڪ- 0 0 0 پR8TC5d````@;{ƍ}p۷oVϼlf]]kׯ_]qZw 2 0 0 j@( ````` ¸?ӧO_gݵk6`a7Kٮk;wucB̖{M~osWq) !7 0 0 0  0 0 0 0 0Ka?f!EMq7_~,p}֭S6lLog/Ջ```R ٞ&z 0 0 0 0@RøsמUUps{o[-竏kz)n!e```.BB 0 0 0 0 0@fqoi=Dxj7nD^uU8wާk```cp=б~ 0 0 0 0qϷɓ'ܹsޒlY˗/۷ug1m ߼y }cO8wܘt S/```87 6Ҁd````%ø޼ysܠOe5o߾=ݽ{wv_ÇO>} D޽{?pnqW```8 n44_ 0 0 0 0KX+{}_|9ş΍;#$87ܷT,c/fWo&sf=X}P0'͎{il P_```ӀPP. 0 0 0 0 0-¸]7c?>ݹs7w֭F1 0 0 077_````4Woc{u/;6_C!X]s|}v .;Vԓ``` r3,k`~2 0 0 0e-WYyuu53wޣ¹\:c``݀PP. 0 0 0 0 0a\ zH VsYʚ 0 0 0pntͷR 0 0 0 0 c@w^WZqLs# 0 0 0  2 0 0 0 0 = x岸g\g1 0 0 `nL 0 0 0 0>q;uer!A```=  2 0 0 0 0 㺱Ǎu 岙d粸3``` nOٞKm  0 0 0 0ݾ朏Pz~sׯ1j 0 0 0rr`````D¸`!Biui: n_s՜``` ٞxPZ3 0 0 0 f@LmP.CFp.{~```0 + 0 0 0 0 uc~[,S(--\Vpd1 0 0 bv\`````a\7s?u;rjg 2=e````In{ 0 0 0 0 ¸n/y~e 岼9ew````on{ 0 0 0 0u~K_(Mm<\6g}1 0 0 ,aK2 F````i@}3ކ&;\#Q#cx'```0 k````΀0?$e4¹Lf0 0 0 i Y_```` ŀ0.')NJxP.%8\Kmf``0 k````(ހ0n7=HPnz:fs7M```J7Ph````7 k|:>rٯžp.X3 0 00 k````(΀01nP{^r׫-yV87Ͼ/ 0 0 0Pn@ 0 0 0 0qBBcA8Zo```< ````o@7n|za\ }λ3 0 0 n7kowc```ـ0.5BƆ\cX`````KBfAa````u#}G\P1rDknp1/```1p 0 0 0 0@qq?BW=έƵ^3 0 0 a@(,( 0 0 0 0 0a\7~u 3GŶ #[8``` dP깞3 0 0 0 4qYppn=6```ˀPYP``````a\7=ˀPn^0>pn}=7```L~@@ 0 0 0 m@_(77uF8~ 0 0 0 ) 0 0 0 0 㺹CrS_sq 0 0 0@FlN( 0 0 0 0a@ǂc=F$[_ύs=g``2 k````4 [׍c o8,¹e_```^7= 0 0 0 00.4 {̾oM8z 0 0 m@(,( 0 0 0 0 07¸ev c㸬 OS?```z x  0 0 0 0 ˀ0n7=((Be¹ڸk```Lx 0 0 0 0ׄq˼~}ʭ}-ƣ>2 0 0````*6 [MaPn}6pn=6``(ˀ.?p1 0 0 0 00ƀ0./cxgƙ]2 ˇc 0 0 01  2 0 0 0 0Pacܸu]eh!Ӑpn}1^```1KE\ |```׀0n7Pn}7pn=6``8\\```` 6 {nRrYZʒmI87? 0 0 g h=s```h xPܺozn9 0 0 0\\```` 2 X7ekBmm0'G}```5KA\ z= 0 0 0 28\6 [wg``3 + 0 0 0 0 wsՍmـP.9mݮ\5 0 0 0@\ b```׀0n7>ŀP.')N'¹ڸk````_BB 0 0 0 0 Ȁ07TVe(VmVs7 0 0 g=p1@z 0 0 0 0e1ryk"[_ύs=g``1 + 0 0 0 0@qQꆴ:f@(L۟uL SW^Օ``׀.?p10z 0 0 0 0 ˀs 3ǏG8z 0 0 c@(W(```` s# fùP.󵙷˘]<# 0 0 0P\2zb ;^````@4 Ӓ,>O¹8s````BB 0 0 0 0 h@wn#5 kL ,a@8%YG 0 0 0P\v|R4 0 0 0 Հ01aL0\eέƹ3 0 0  2 0 0 0 0ai72V7W/ԋ4¹iuI```z xށz 0 0 0¸ 80\޶f]yέƹ3 0 0irr````Xр0nڍJ7tՉe .[O>Փn¹Ɔ 0 0 0W6````^¸޸ rCPCzzm\5 0 0 0pـPP. 0 0 0 0 uSMir0 amC}sq 0 0 0_ x`````z {^s4 e.mS=.sq 0 0 0P 2 0 0 0 0bqtvә rKTKzn9 0 0 n`;0 0 0 0uƭƷ~̀P.G3k{6+[v_e``4 kV````Fu f`@(#89έƹ3 0 0@m¹8s``8\30 0 0 0 m@ڣܬ.i@(%=YOe3WM``5P녭z 0 0 0¸ 80P\ko/[_ύs=g``\  ````(¸nz3v\޶f]b@8\,``@Q\@= 0 0 0 285j6 ; 0 0 k@(L8 0 0 0 06 F7x՟< E_րp[```ӵC?p@```׀0n7aB5Hǀpn=6``6 k&````CuSu rzӭ 29c``  s=g```ƀ0.  0n@(7V\έƹ3 0 0Vr̈́ 0 0 0 0a\7KYj=d@(<2 Z,-``50녩z 0 0 0¸ 80tBkǝ1Pzn9 0 0 e@(L8 0 0 0 0Yu3tVMryɻ}})¹,-erXb``^Y=p^z 0 0 0 0 ˀ ,g@(wZr g@8z 0 0K5 0 0 0 0q\frΝ_CՐ``j3ڊm`````?  g@(wr g@8z 0 0S  `````S¸nfN}0n@(7V\ e%Պױ 0 0@6}Z^````@΀PvZπpn}=7``R  `````U¸nVެ:VX΀PrR-`πp. 0 0 0@\```׀0n7 @ǀpn=6``  `````Q¸nJݔ{FX߀P5X`m@8 ````.@ 0 0 0 k@z@~rq' c@8^z 0 0mBfa```e@MG7` ?B83 [_ύs=g``Y\```׀0n7 #Hǀpn=6``r̈́ 0 0 0 00ʀ0n,3@rqG g@8z 0 0@}F=p> z 0 0 0 0ec@(82 s\``ǀPp````.ƭfz@9rq k@8z 0 0@}z0n~Pna)N #H``(߀nn1 0 0 0-fz@4[Yr4آ e@8~ 0 0 xVC7XO````I¸<-ɲxb`G-,0.՝f``yrr````Z?>G$;Qn2 a\i1E_(@{-gܕ?X``z T z3 0 0 x{ڵSLx4¸ ͬe4ϖkܴ>N 0¹Eׯ޽{w\}..``1 k6$```@<ʽyaf]憘y\7& =_k}aXoUԌ``{v&f```3xp<=l¸XRK@%}>Foޣf 0 0 0\h 0 0 0 0Px`{7rq\ㆠer@r-,y18R8|ϟ 溷Qս}v0 0pb```1jہcf\nB w3,=fKr-,:f1@mϒ\]~ݻl錙:ƌ>3 0 0@j 0 0 0u/{SNaܺiϺc /9͖kܼl[57Knswݯu80 0 0\!1 0 0 0@"h{ϖ+ 4780lfy-7Kuծ_d7 0 0{(۞ŵn```< ă١@n~rqa h r͒kl86l lo p,{ϟ?c9``v3ۊ]0oj 0 0 0Px r\a\cƱhg`g5Kqf 0P=ù)6tׯ_?{N0WC``] Ғ/F훛- 0 0 0 c 15gdžq 0p{Ζk\cc6r>玙%})>GLj; 0 0@r````H6Ď 5[05߀30^%w^߸W?V1_]]] Q/( 0 ـP" 0 0 0 0Px;%Yj\a\7|ж@-,y85ùSf=|`{E8v,c` 8u 0 0 0SK̖+Fc73 g`r͒^/e Ν:Kns]wӻws(G泒`nɧ ```(@KTCj3 0 0p\E=tN```0Z@rq]¯ep l5[Yr9t,b207;w«+\_H````d 7q```.Krc9C  le`r͒V520%,ׇϟ?````uEu] 0 0 0{Kr/͖+^έ=6lfɭۗ3Ƅs%~ݻwB2 0 0 j`Յ}fn0 0 0 0@uPl¸90{Xs\䲽m揁 sט%V})>O˜~7 0 0r````j0]}7onݺk y`kܺo 0%qʗ/9̺ 0 oI ```8\C[ˍI=bZ L-,G-~@r%#d;vf5&ksalf=F5}b 9Ns]xS| 0 02 ぱ uT#Yh>D.8rmc 0 0>}t/m}>g}f5& 17[Yr9kya5 aë+tLr$W 0 0@ye.``IG޽{ >``z m<5[%k-'ck\25ǔK8,xd,˸b``\z=ݻwn{ō='02 liov9n^+[.h% ` Gf\S_@l8e``  1@qB;~zy00 0x[h^rcܪ= 0W%0>`\ u1&]\i;m``4P\<鋾li/{uuF=``L |t֭rfuu 4% ` G% 5O'" 0 0#3}i;h&#H`XǏOPݻ~ƃy^8L;Z}~XzhyooI>J?c1˨k:ksK;ڟk>mCYm 0tBN`8B?| 03ws>z m>G/ٻ3>f0f_|]4Հr0o posGO8N`8B>leb9w~Ԁ@0uq{oB@rykǒ㗚3@͛7~ ;s{```Ņ@Y z$W`sx`=wY# 00̀P zP]fsgct-¸BװJO8:qg 0 ˀPYR`8B r_ 0 3~ 02yP]f_ݦ[{v [f_vd}10πkyO```@qa{ cX`ӀƃݿkV,1 N]̾b`:ŗz1 0XBfIe 0a0 ـٯmBeZ̾rܱ|`@(Lak $nI\\g3 0@ Xdd3mͷz3 04?a@(LTWm}e8rӫ15l}ck``uq 0 m@(, 0Pܲ?/ 051aL0r2du S׽-{~gθz^ 0 00@qaz STO`7a|0r2du S׽-{~gθz^ 0 00րPYR`8B> ~z=3 0G6&Gk-ŀPn=.w5-ӹk2Z1>[:<پ 0x96 lk@(wz 0 k}Ͽ3@-Ӂeq[_8j̸r [f_Z߀k{` 0e5K* g@(.'& 0_ xiL 0{8̾a2Ya`Buoaޮ+p3^| 0 5P\olޠa<B8S` xi| 0{8̾a2Ya`Buoaޮ+p3^| 0 5 kT(΀Pñ^  0 xv~K1 [ek)g rt̾pLd%y/<3 h0^EM?ݶ|7 0@s 0rtAu}uWc@(83\Ö17Zgz 0@r͒-ˉ2 0Wh o@(w1<.kXLVXǀP:u۫k2+犁q\댫_ 0c (πPny=5N`~h o@(w1<.kXLVXǀP:u۫k2+犁q\댫_ 0c %3 p쇡3 0pdh{dR iك2Zq~Y2-5#%psI 09(.cmr7 0<ܷ? 2xP]f_=Nƌ+װeue ٿƁ0 0P\2@qrrb 0 ՀƄ1ݿkV,1 N]̾b`:ŗz1 0XŅ 3 [^OS=e`߀ƇݿkV,1 N]̾b`:ŗz1 0XBfIe 0a0 ـٯmBeZ̾rܱ|`@(Lak $nI\\g3 0@ Xdd3mͷz3 04?a@(LTWm}e8rӫ15l}ck``uq 0 m@(, 0Pܲ?/ 051aL0r2du S׽-{~gθz^ 0 00@qaz STO`7a|0r2du S׽-{~gθz^ 0 00րPYR`8B> ~z=3 0G6&Gk-ŀPn=.w5-ӹk2Z1>[:<پ;uO;{16F10|nπP|mKc2뚓1uۈ#m 0S ƛޫ31P2?ƭ&k`Lh2ݾKeu +>1 Ln̺nb ̀NZ{`` ՀPUR`8G ?56߽{m}l+BVr>7|ôkbqϑH^+8Ӿ 00ĀPnNLTY!}kԞ< Q1luۉ㄁e ,۾j_`<>a !_߬ZMV0k[m[nL~}c"Z}0le@(ŀPnMTY\/  Y=Z,ـGgX@qa56tN6pPn V!9V2:綦|VʥV oSޛc(79l Vm0vޗuD}goLh27 Duu-X-ՀPnaˬk!ku.նb` BVIe 1 {nd|N=N.؏~~BUcͭ&987 lkϿg0 [ey[]؏*_ÖY^0پ0 0Px V}w#r}J䐶{M3N\}}nɱb5B+ڴ_l0K允e .Ӯ[{5Q]f]ve\10܀Pړ+c2'AN_ 0 ,k@(* 0P#rcEfx?3is _s.{#$l|1ofq``iBe3Q]f]>|nπP|mKc2뚓1uۈ#m 0S ƛޫ31P#rcҹWoX}P!~eKB˶e`&4ig3 3 [e\]؏*_ÖY^0پ0 0P V:UٝJ}QCZn5ma\Wu^e)B˶}gLhGڈ6 [1eu熁 ז94-9/1oX8F 0 00ŀPP. gਡܸ4sUq~OoĿvo\i^㏃>~8}jb_cǶi6>?vSBq vm==mۈok:FJ>;sƾnH(7ک~͚꾇f;p8N6)>6yUw/>Ѷz4V3s\}dh=|2!5hO+b\BeDuuu.VWc@(w?JWưe5ŀ2u~ 0 m0eU_bȡfh6BsCڬz2U(pj(7 A ;*p3==#lط>kǴmm# A 6X%> }6C\D}WuϘ!fyh pG : iF{3[1!-ロstWK7f_\&4U/K(ӀPnu5Q]f]ՕOR1luM1 0ck 0eʵJ* gȡW6ClF:Ɔr3mܮZs?$mnl۶q.۶h&r#:$֩u>mRڠ%nJrǴi{=\=CPtD?JͶZ⪫ߎD Fͪ:U_ 12'A ͼ0 [fMTYWeue`?rS~e [f]S x- loXgj 0@ [6XU!ʍ0g= li3V_sL(7€@Z{?m+6WuJmy6U1hgEuU[}~ۂǪ8cWm[6‚mv&,?K rSp乕mj]h0gʱ~ڶm+,Zկ}bno5!׆x}\(7j:% m/} JR+5 [fmMTYWbue`?rS~e [f]S x- loXgj 0@r8z(| r6/Vfm߻Šmyv*h{w׃q] o F"vcsέW˪vU\(-lzaf[wm'>\ȶmEӔX gՏGڬ3lm4OmVԡ+8vG[{un>RùmW^|vKIi3`Є>?e-&ˬ2BUJ2-)7`} 5``lŅ-Gփg\Mcus즆rl[oSZuxM3ض*lڂնlE/B}!3a!6%e*޵C pm}_ۂ] o[vHy9)9>RH[iad{~&4S+J(׀Pn5Q]f]ՕOR1luM1 0ck 0e(.l`W}=}\m-`lf-ț~m~ސP`OmS(;wl)16src{P_;TbnaPnj{ޮǬPi  W~|u);yyvYޣܶ 0rt`̺:o+1 Z+c2bk``{:@?P`6 kT(΀P/\۹Gk[X.% .LKٯ9nlRB:,c-5(d2%>kCM ={[[ۧ|!?)Fh фf^?ԃc-&ˬ2BUJ2-)7`} 5``lŅ-Bvjseڮ YJujx3)5PkRB@_L{>)ϭ^)u?,]1Im꘦}]S\7;M~/'T+=hm9̄f^?ԃc-&ˬ2BUJ2-)7`} 5``lBVIe rj#vao^߶.%pvR%SkM#g3K }nl/#jv,D(7/Bm\y誯C8 sw_RcSDW[kr- D[t{s#c~m#c{̻>0p BeDuuu^VWc@(w?JWưe5ŀ2u~ 0 m0eU_b@('`ph03%p ;Vcynٔ:g(yf޵ps!ZF{SeʍnxLٿPn}{ 湩a">5ڷ#SEkIm+˄f5ҏԈ Yce9Y]؏*_ÖY^0پ0 0P\2@qrp5*>n͠[[E8K [ (ESbαmef;T8V ePnrUm))xϹUdCpWj_j;Gƶ}W(w)#c{3w}arˬ2꼬 ǀP~jүaˬke l_@ `(ۀPUR`8B_^/e34~K 5×["ed6;)f;DPnϞ[ڇLk3`;d%)cYKmS WX1w>262ox;&4U/K(ӀPnu5Q]f]ՕOR1luM1 0ck 0e(.l`W}Uk-#Yrj}ϭZ\wN9%l@nl7%8&Y?f-rc;C?j(7VvmnkHwJm/'ͺ²}!!mzPn|n< YŹ#c~m#}mweBs5Ԍ W&ˬs2BUJ2-)7`} 5``lBVIe  Ŋc)Xٲs㑲_o<4^N.cy4Wjmt;S\/ʭcjs~^Jz]H)@hB3Gj@rˬ2Ꜭ ǀP~jүaˬke l_@ `(@qa<`;ĀPƬ8Uwܥa"eꆚ!ġ^l0jgoaPnFCalm+:$dҬGs^͡:uc,ҡ)ֺi>2}^PK^.~V k@(ښ.~ V)̺Zހ5Ԁ` Z%3 ~jjlE?5p;Vܔm3=4FoPt3+sPԕmLP!}ڏY{CGSi^_;Sضo #)vAN m@(.~ V)̺Zހ5Ԁ` l꫾C ;iV-mf܆1^F }o,-,~`i?m 6q]!+7mhmo>5)w`ܔ}j iժFa\xl0tPn48ؗ+wZm>ad9seBs?үԊr Y[eչX]؏*_ÖY^0پ0 0P\2@qr/\m?^M Y[2B]؟U蝻!i;6p7|iJیP\s{ڎ%ڰ/ Wfǿx: ͶcBU{Kl F]_a3̍mtr;&lz-J+t]u gV{*;&~]睶jF]Ȑ5W/bLBeDuuuVWc@(w?JWưe5ŀ2u~ 0 m0eU_b@(I3ڷjΚ+^Cp^'-ʍl;l JY_g_(wwm޷mgq=[}~k;޶n[(U=gVy xW}O=4תxne!ׅ>>2ٵKIi3ǽ }IR'6 [f}MTYWcue`?rS~e [f]S x- loXgj 0@rp V ]zص 6VW[iʬ}`xcWp8jٔPt6)_h[-]+vV3r F; 9|F[t\gl?̤IDATd;dybJ@~H(e>2)#C34ZWj@rˬ2\ ǀP~jүaˬke l_@ `(@qa<`;B!mS7VJm!ٝ V+1ۈvMsuss۶*lW=m^}Uf9뫬~~ [ɳ o{=#@^w??D;vs͐ϊ2۶>7փx݇^ڧ%ض?^K~[&4W3L(πPny5~j̺:+1 Z+c2bk``{:@?P`6 kT(B9%[GXp{sX2>#ld9ψ [|nl#eVڱ<}Sj%zvݘϏϊE2;՞cJk]ՖUͦ~ސo&14:xR}$FRy Mh]G}82l̺:/+1 Z+c2bk``{:@?P`6P\ز1pP` ,2BSj2Q]f]Sx= lg@(w_ҽ1lu]Ҍf ߦjS`ʵJ* g@(ׅ 0G2`B#ywj@(L&ˬk6 [f0-a ^z1 0?Ņ B5S 25) 0@LhOQ iDuu=yq[2ÖY9#0H+ 0[ʵJ* g@(u mr 0mevcBeeչK]؏*_ÖY^0پ0 0Px V}w\N8N`J1`BR,;l@(L&ˬ5ɣ-Ӽ1lu=rw: ݰg` wBVIe ~2 i&OszY<10΀Pvݛ2뚻;-?ÖYW.ue`_uU/K`؟xr۔c` g&4٧}(r˴n̺8-Pnaˬk  ~$w`- Z%3 낺6cʀ Mg122ܥ ǀP~jүaˬke l_@ `(@qa<`;ĀP.'Cx ' 00r){6 [_eu i̺x9޻cn3 0\2@qr]|s?F`4`B9=,g@(w\Duuݝ㎁_ aˬs2/:^ 0 @qa<PlnBLm1 0|ӾyBeZ7Q]f]r^r`@(Lưeֵsc`Hux?w; 0ruAj1 le&{[ٳ]ŀPnDuuuRWc@(w?JWưe5ŀ2u~ 0 m0eU_b@(!N`R dˎ=-ӯ2s}ghr4o [f]v~r݀{7lf݀PUR`8B._| 0s0Ӝ|O 3 ;rf̺q/rưeչK]ؗc}KR/`g0Cfj6\6bـ M>si<2.G9/9N~K0 [cc2Z91y$:ɻc`` BVIe 庠nqAM`2`B.{ b@(`̺:w+1 Z+c2bk``{:@?P`6P\ز1 '^  0@)Lh\e Duuƾ3y4Be7-G;?9^nX 3 n@(* 0P\/Q`9 iNO>'n{3Q]f]swgcBec2ܥ ˀξ 03P\!T35ۀP.SsyL1 l&9l۷~{ݻwzj 2Z97k\oK)[f͍aˬkhcX^Kmv 05u"k?XdXÀ Mp66",T72&-bL*[Duu㚖gy77cG(LưeuL=ưnt=ŏ 0@"&>HɀP.GXyg0^Mh?+ L(s2뺗f~rs6vYBe̺x~6eD1]o7 0@&M8pAGb\&`8zyꧢNӄ7|c^!Y xm6Qn]߽{kl=Yss Frٗ5߶;W.׶[5]q=Ʈ?㺻=lv}{pQ>]e2 0@f  z3 ݀ u@[Ф <_&4觾O%r˴cz5G9ǒKZTL2î[ׯ[[nv]CuXgvQ;2 0@\+0@qr]0 0p$&4?Nh* Xո&Or˴cz5567<\B]/Ҝr,2îW׮̫k>Xj ^ڷ>p5a` I_  0G2`Bs=oLhcBĴPzPnvLTWs+Gkpi9'PnW=z}iNW9|Pnva׫7|sv X]\Xj ^ڷ>p5a`r=E=&bp')wu|1v|KOP0~mqձrYlBs6}+ U<+? ^?xm-ӎuڷ}8oG.o5%TN_**[c/îgJvWGXyg@qa-6u^2 W=?"~"[egx?>P#M|yǥʄ:5h[!gg?7U#VJ#}qerukD:ukg5Je:PnOԳ~qQUO˹G(waשk|ps}|v~p0?`r=!~ivʷ/TԶ_|iPVv]xuɣ}`DŅJ,cra ̀PnZ{K(W(4SG(W(w!jBs]"ۜ촒 Menm,[fMT/[׮؍krn?]j5>>iv~׿V_~i_,=m7[ 0 ـPUR`8BǾ  +O]4l&,끠t_cem nK7Ql]۾R_ mj%ݥj]^]Be6]Bs%\2ưکX#w3 i0ޚg[:+yͳ.k\ܵe;BB{j?_Mho!~ڳ]z%8W9ñ_/';LCg$dTo?:勇cz\~GzH뛯VJNmg ݏsۋ}iY[x3~}]\9ֵ-z^SNQ?v+֯qNrg֯S)꘢klע~]_;✁g/Qݚn/;B˞f l]۾z۾ N1d|c 1l{jKSS :}.# 0 g@(* 0P.{ ,+/5ׄuh 4'b?h4U 'gccmcmo[˙k <6W\7mV9t23&&R>\/sn+<V;!as8zuv|]a6mj9sr;oiDrum;u^\9>I5G s׻8 p/pr4SVXn1~ǐj .wҌ1rum;u1eX2|1+No܏km-9mXgsqNu/ 0v &ms1Pz(?[#XU7uM}] bk\; 6Q߱Tзs}+(~a烾ur5`Bs)+ VkЌs{܄f\>+Kk]7B]U׍fv}8uYVmyOvމ{{Vq|՟{Զ\~ .s.޺&k53CΗtrS}Ͼ@nu}<ۮ՗Vf<)ܨQ|m<rnE˲5͐mn}}B˜1rum;u=^_a<~5y^#< ta=lXgso՟` Z%3pPndOsE!ٔϬ~CvM|$uR&vIio8Cc~B193Ky׶]}YNJ }m w'=S6ejտs,?8؄v ߴ]B4}6ٵ[} b 9ua0l|^[ȶZxϝۇC7匔/ᤆrڎ}wsfL/^Q^(ws֞LT/S׶ع/w}1"5,ZԵ`[ĬB(})ps?܃]gڶ}^Pnחsַkط2`Wx1ھ=6ԜGxP2.S׶UL+K1/5s[tX1;7<,s.C3 0xG;k -`'|f7޵rM|n5!Ut|ۤU_2jk !P[l*]մy}FKH8jexm֢ZsАv*0ګkuQPnx__n3[w}놬~5z_YJx>Ki2Q!]a!.C|r.u*|3\Qnj(7ڽoI!ŪDŽry׮kB/4y]\׵kw 52"ͮscߵ,:泞C rI 1Y߮>+]_zO~)'}xĿʝ<#ce6Nh#RMSCIư_]I5bcخ{D@߽M络Xgsq5oj 0r8Z(+ɡi{t]xKέo x_t}PnDɐm[5܃ ; ]1(v*Oj;jK=msӶR͐[rﮕ> =\mEݾɁhj E[s2t2uɾMhkNח¼MhLd ,/{/[ CC}̄fWvL })svmǔ?58r?R5Q=]?}sa0gm_4s=5Gjw9w7dl g yfRP_lj~ߗ WT>׵zs?W(wpake)ccv=z67mkSct9/}0Y\kz3 0xHk -[)s j<}ynrߐ1ؕrSۡiH8@mo]K۶@Gd㐕r듦>MePnuLQtըo{H(wl 0xDm[?wjs; }Ƚd0`Bs~Wm!縮 mB7S!QB]?fB}ukIȱƆz;r?Dum&BWq{,&9lv﻾v]Rߗ۝cBs}spLw=0z=LIʝ<cv:N%Va{1l׸ m$z3u3֙\\  0 Z%3pP l޹c32AqnVm[s16&\S3h1tmNR{k3$l]m\hrjv۟-CCA!{ߪ}ܶB_pW>m>=W6 4~ݶ69{U{59q/=:w˔wƆra50idb VCmikR#F(wsq~LT[ױ+w?Rn\(˱lvc_sZ}hfL(7}cgb {ʼn1um;=<ňs9` Ssu!חư].Wcܕ˹/0֙\[3 0x@m Jk 12y>fסum>97܏1ECCxc{Sڼl6_f`dDVsc6C}z[r)x.;vUؗds9i3@z>j Ogo39)+ I+ulRLO #]As+ğVۄPnW86eb+tXX6/vur=RWuJa\*Cetju9Wzm!PnR)m/d=&W~}}ʝ<c: ws|1e ]#a:z8^2s`.a38e` BVIe -;4xP]x~N8/CCCycnVʝȍhvoE[(95[c>eǼLynckзRls)s&)!mO gR==Ƹ1Q/`Bs^mIȾB%³]%c)!)2u15r]f=Z|G(wsq.LT[1ɺpG#ϛ81 lhKI_:+ގߡ/Cy';y8ưֵRF#1髽abu=Ɏce`a )k'T󾧆2eݶ^3p6H u6qehԿo~[}h> q75Pɽ6mȦ^ C1ռ yM #ŽcW["(T?LDlz7e_&ַ+ԷZ__Bs1dzvD_M\_|r8K׉)+Nٗ)+5XյU^V!;_[9cyvK}&v1_(r2h ;-v/? krގμkmԚ`m Z%3 {|"|dZ<ki>zh75zmEjm4P͔: ͇s<`O!禧PP6 ݄|m>ת+]^ߣ?-1K߹P[ruy_۷9ݓoKw%P|✜s3ΣR s_mWsx+}U(񥖾6ŪpOxz=m{;^(wpNNak׵Km@ci-ư]0:v0VܮcXյ2kh{RqP{r;v:_|1kkR3o2vlTXgsqF# 0 L3 kT(΀P|ܟ:79rcRy @(嶵KJȲgJsΛqn_RrܨQ 7`k@(wڍ}g] ԧ+䓺^_Zo 1_H=749-h{s[A^(wsqn6Q=O]}.]_.IIB) vxfc}=箱}Ⱶ/}Bssb ;_]ۮ*sפPc 9]3w/1֙\S]틺2 0xpK-b+Br#U]?ל%v,)16O׷]WʵRnrmSVj>qnZ(׽V{ۮ yJ CcZkULhvw ͮIsm j']3+04|";r]v}y8e<3wF(+S]Vk搟n_3w<}H#[cyڵxE^cR;Smj Kwk_8vP::sR_`7 A g@(ť-`vAn[]oBch0t>Q34 4C:K_Xk w쵴}C yݯ;:Ҏ 0 0e03Py/mH<,sCC͟JJ}xwD͐ ĜM mϑM_c.m?)5Hߔi>N\ܶjC.g%e>nC(=X{{{ ֻVZb23>3%V(7:zw{f6|Owr;Vޗ~^Mo=83Q=m𥮯VS%~1B y.4W-|u2ۻ|=ֱm'Wưu/5eIm/ca`1l@Ǟk{Wcy{}V{`1 kT(΀P HXdؕa !7}!oYkR*m}ڦ8͔ksRC})5K [)8n< m1gj6W>Sj9ƿ3οngPLLhvwPnDcۄu=Χ6ʝ=sizz]Z]6M]Ks/l 6C] gwrBbNkWr/<$g/sݻg1Vy*uK7a``x@L_ ʍmmqղa]DCHJpH8x!!k&,XpZ܆G6 @mmVfʍn^rxl8WXuwߛМnmR*&E{T ! +sM"V!֖mmuzc}iܴڋ/ڵ:`85d{t}u(eS`:_srs5e ;m_V:6:~a 5MY>sUe3\\ 0 Z%3pP`n|@kbl(7%(`t]￰uM 嶅XB 6C)^!KxRe+86q686 ]V(wm&s FYEz0mrrϭCU^_[МVokpȵ+2]۵I1)1yfМǚMyPn5JgZuޟgV2T);C( }ٮmǺ/k9N;jvz]aO5]>0۞XgxO 0 A>"ms3oOzum{phVs spB_06SMtvH(7&]-9jv9lۧ[V =|kun}mRծk <מV8w-ڶ9mN tE>g<-nۮBp _LhN=)\bB~zrm;JmϸیujFsۦ#uxA[L;'~&sUc&UvDzB]_Bm+1=j;˴~y[(71촺֥/ }M2/v/@a3\|t?` 6:V/MQB1 0i Jm!`h~gַogj(-7&q.!jڽm2(li>}ܶ炎Ѷhڌ ov=l!ڰy [ [!f~oGEݫm5W?OYOǺCMhN3v3t>|!f -V;K(맳T[[YűkBoQvʝކ9:4Q=]_JR^Bjm95FWP8u֔v躮OF:*t}\ڵW Rթ5aa7Oư:d%&!r7ư]񹟷L~L;َcg`arr`8OO׆a'ǜ_ ƄCת]}̮ j{h"XJ04ڹ&s?m?5K]ܪUt{QW࢚XZiN շDJ:\mV^_ൾ>3]?v4kvr F"W9cnz->3jsO^ko549X C)~uR]4cCC՗#έ4w* ͮPXLX#t R}^3\1rm!5X5&׵k%!+Բ-I> =6]_3n`}9mϓŚBCJ{PnuSÎkuf2N]Ϩm6;\k u/):yԡN^`دx0ک\׭/n Fu=jZrZ F/5 涅q㿝J Y4&]\Tsn}s2;v kSùԚ8V[%r'j(W8a~SEmP{-r㭯PRP^%u?׸`Րv_Ɵ< Q9kez|]P:wm}ƹI TUgn_%{Ful3M/;B9[2_׮ٗ9v=ŗIecBkajF|ty`5>G-`ӀPnf\yv.uٮ.CfPOenvW( [y6B;"q3cЯzLY$&΅b0!ang"\΍EJ5^[?f`&)ḆC'^W3tACRTX&Ԭ^f훜S_ ;?}U?>/祮DWz߭c}z o|5|7Pf 8Jls9|.2w*PnW(~SG]بeʽf=UӦܻG(w8g&յ+y7Tw|.4!e~2=ܹ2M+Ƣ#Uʝ~^ca׵u΋s_f  sܗ{,џrLciԓ` ӱXjmMʭDILvÉ3tă3wMO {aL81?f=cWh1ݬAqso> znhն=N){Ô$)Tu6﫟s?y>zܱ0ռo{?p79Z CK]7f/4W`m{TஉsP9reKY]P}ѩŘHoNSsw.ν&յk%9mfr&5/U8:? }S(w详L;f=N}{~\rrǝm ;q6X>k{}AйIg]"/y8#a~9t.5ޝf0¤}!!U_)m׎syn!j{M_:B=v\]Ǭ>C/;}Mj;Cư%j%ڹ"%po3\{]ퟺ2 0\2@qrȸW*KO25Q3`B3]KpWPDq^2AA!3-xnqU"BJC&5Qw}H?Nk=3Q^/.7]Aʮ1פsdž 3`B3ݚAЙ;0m5R%BJCB($XJyvo7sKU[nQ8!uV;S̕VM?& ZfPXPfe?n)7 Sڳ8l[-ulPsqu5Q=[\*Ky}n9۱Ͷt\wt}>󦼷Pqk꾱ީGski#;<cuMG>~r[:p:h ;֖cخu74J,c4 `1P\ehW'BZjܔUG`t&4ی3ms-Ӕ2:wy0vKưeuKSuی3m 0)ru!Lz-/ 0{7`B `@(L&ˬk Q i̺8-ŀ˥Xv,3 j0^ mX@X47zk[ښ` ̫z0pLBeDuuuVWc@(w?JWưe5ŀ2u~ 0 m@(W3Pz(ׅ / 0@̤zf߀PmSe5[-N{d [f]rxt:`6P\o:%ͿF1 00%ڒ-ӎ2:{ o@(w6_ù1lu]Îm|ukK.% 0 ʵJ* g@(M 0G2`B#ywj@(L&ˬk6 [f0-a ^z1 0?Ņ B5S 25) 0@LhOQ iDuu=yq[2ÖY9#0H+ 0[ʵJ* g@(u mr 0mevcBeeչK]؏*_ÖY^0پ0 0Px V}w\N8N`J1`BR,;l@(L&ˬ5ɣ-Ӽ1lu=rw: ݰg` wBVIe ~2 i&OszY<10΀Pvݛ2뚻;-?ÖYW.ue`_uU/K`؟xr۔c` g&4٧}(r˴n̺8-Pnaˬk  ~$w`- Z%3ÇӻwtNwanc7 00m` DuuuVWc@(w?JWưe5ŀ2u~ 0 m 0peS_=gয়~:=|0f3ĀǏ\\Μk`8ɻc=WBe4Q]f]s=/21luuSWeXg_ҿԋ`'îfɓ'7jB3BͿ֩5z5e`ۀ MC` n_%2뺄 ]]j [f]ve\1fX'^ 0 0j@(W(1P ̱B.Cg``LhgR rjRm;.b@(Lưeu/6`/ 0 0lx l݃Gj;wi'^ڗ` ̫z0pLBeDuuuVWc@(w?JWưe5ŀ2u~ 0 m@(71C!w{t֭/_urb`3Lhn{k ` DuuuVWc@(w?JWưe5ŀ2u~ 0 m䶀 dex";"dܲ/\cLx 0%0wɾ{1 [Ueu/rưe9K]ؗc}KR/`g 0@fj\.stidXʀ Msb`m'W&ˬ Wn@(>` [f]~r\̀{3ke؛\2@qr]v12 L1`B)~1 ;O;Duu͙ጁnBec2\ ˀξ 03P\!T35ۀPn~oo~?|͟mܱ>~_cᄏi81|7Z)_` [f]~r\̀{3ke؛\2@qr˿ ._Pn~oG7ߞJ Fskr? ׌ qƛvc9 Duu,FXրP_c2뺕'剁quƵoڍ`(.7N'a\Bֶ극~Zj^5Խg~%~7=sYsLhK iDuu=rgBe5-{>w&hX#w3 i@(* 0P/r˱'Zq-ebOcB=yj@(L&ˬk!krtm [f]K<9&VK6`wɾ 09(.Cm nk5([=\5eQ9cZdik _]j(?.z2?Bِ~f [f]kԞ| S B-`(ӀPUR`8G {!¯7ߜ?<1O @l/>Vl;7 z?{Pna}]?1ʭhֲ1^}~ck:I +3Ys:TSLZmΆ}BU$>뵫g{lo豞{]V-s3=| M~>rZ2.%-ӳ1lu-X=cΏ1r 0x[6m a(E$ṡf^ 6wߍ Eܱv^+mvxc#Z)ܾvESCVRPjOAaj-m?|]>CCA> tF]}0< qW},;T4؇ck_^ǽBiu0Ui{4 [[euQ i̺>a Vb~xdBe7Q]f]|rL̀Pnfaˬ/ã0> 0xK7)@ܶY"amao;ċl3@Wmk;1!=dU؏p^7S_1u>=_w6":heZvȪasr ժO}jKrvm!զ/u^H WsgV\y: 9v\4:%X>9go{Lh2Q Duu=91[2-ÖYR;ϣ0a(' 0[ʵJ* gPn=,as׶oZJ(zd[8` ط[8: 6W̬Bі?Z F~LY]AدfpHpbcpKs 6>B}f vk[8BY ^iy߷m+W\c8FY[0\5cvb{maޮk۷hW7  VլѹX2OZU6x[{ O 妄kӬEWm Vh Z)́Ϙ~dv/V'%-ӷ2Zȱ1[2MÖY? 0^q8 0 mݠS3_נzdʯ͕ur~3z.ܧXvhiZ9Sk[1\5C/5L96IYL 4=54hԶsW/m@S*ȐצRkU߇fJrڬu6rːy&?LрPnMTY#3{5 []c2u?}> 0Zr駟NO>=ŀo[qʝ7lkmѡߩsƚrmA֮cP4mN ʶXs])wNtԶLՔp CuLhݾKeu +>1 Ln̺nb ̀NZ{`` ՀPUR``V?~U 7&'?~Z쨡E/„?)a)A)!θMǔPnj`rHp0\-5Y϶UGm$R3CxH2M~mklkh(7/ԫsq^nH5k?&Z93 6`BM<H7 f{pf̺}d?-/ÖYW-ue`_uU/K`؟Yx>|ʽH GMٯ#r#4f`g΅ZCCc15x֐߹ 6_Wo 6m9w,)=rrюCS 97.q^RQ}AWN}x dk)[>-V{re̺ɠ}erưe+ 07Y`ʵJ* n }V(wFkH:;4DpPn ߽{54|)T}:=z:{֭Ӌ/V FK_)7U'#Aʶ?)s6mKZ)OoNc m縯6RσC)ܡΔrZ0w(7V ?ۂ}9}׽no 0kD?0Q]f]3Ya`B˴^aˬ֮l+ 뤵_ڋ`R θrej{2J6C}2 fVƚAĭWʝ~lӡ<50;dUI Ngj-qױ ŦlkhsSj~B>/6RkU?6"ҎCk^u^]Mh_0rDuu]Šde .Ӯ[{5-[}H3`^|i/`H5 +(9KVUnȉ?%8%x 2v;%95xCyh; mkn#%:eToC۸]">4rPUZۏ>=5 SԾ5ֶ별r 85&4զbHrn̺X޻2 ÖY׽o?G3`;^`6P\o= jJ15$Y)A))x~7Bgmw}s!α瘩X*7ë,ogdq^ʝҷYI<>%C7ʡf&4}r&ˬW=-ӫ1luӹž2+c@?``e fY0WPN]aѩr+gF 61=W(7?5i {WlKK_(!+D6WcuRܶt_V?j8&z^h;7W>V!af 7l[~z{% j W>+ 妵^|.{g?cPn̺:g+2`z_ 0 Z%3Pr(d F  Rn[8\3^Ȝ;۶bssJ dlE4]7`ma߮wymڌt"#|=n^_rPn[ / U}jBÔ՗6rus~m+) rr71=}eB[7 } &ˬV|&+ ,c@(wvګ1luڕs@cK{1 0@x : (9l U9}-z3xq,pqSy[0W}Vcsf \;w|=ݪ[e-﯇y7k﫷qv VAf"L w.ʍnۧALW5 ~7kp n|fPrmAx_sUv[_h;gXS>qy4Be7Q]f]v~rـPn~aˬ5#03 0kʵJ* gPn\ A*O[pn k+XcjoXʍ 4Ws.P9}WȴVm ˶6}lkjfmӶZܵlW6)gȊKr£ͺ;[=^9ZPmoTеss}yK]yabo؄r ͱ-gp4B/W&ˬko ڀPn̺:+2`z_ 0 ƃpL6Pz(7jPhBd3hJq,̀m}? -=0mSnu,x_ۊڡwxd:ٿc`` BVIe ![έ8jEUvb|mۆ^"Vd=w5j@r̺qum_~/ 0 Z%3 \BVU[09v 002-.s{]pl [f]d,10ހN1 0Ņ m@(ǮP?Lh lo@(w,LTY%LVXƀP2Wc2뺵+犁4:iŗb`TBVIe ^ ~?frS+JXπ ښkm]r˴a̺:+1 Z+c2bk``{:@?P`6P\ز1 '^O'Bn ,k˯e!rtb̺^ a@(7:aˬN|' ,kXgW2 0PUR`8B.nnp5 [nm[e`ێ;ms-Ӓ2:W9|0x ưeu K o; 0 1P\oA{@rˮ{~77?'nǯ 0`BހP5X.KX0eukưeukWiuڋ/ 0ry`8B.Cg``LhgR iDuu-8<2ÖY##%0$ώg` ˱rmo7 00m` DuuuVWc@(w?JWưe5ŀ2u~ 0 m@(* 0Pܲ/\nLԗ`/ ' ހP5X.KX0eukưeukWiuڋ/ 0 64 3S5@sja]uByem 0Q d('9-ӧ2ľ1rưeK]ؗc}KR/`g@(7PδTr>~xuV͟gJo`L8/0rDuu]Šde .Ӯ[{5-[}H3`^|i/`H5 +^|E lO0 0ffZ{`` Be2Q]f]8LVXƀP2Wc2뺵+犁4:iŗb`TTu 0kk/^'nf`H3`B3^ 02].K|&+ ,c@(wvګ1luڕs@cK{1 0@a*:5O?Y5xΝSԓ _^^fkmA``\&4˭~ ǀP~jүLTY^ݶo [f]sya`:˴+ڕ`*aǏ 2 0 H+-Ӧ2y~ /aˬs2/:^ 0 πPUR`8BP3`09v 002-.s{]pl [f]d,10ހN1 0Ņ m@( 0Lh lo@(w,LTY%LVXƀP2Wc2뺵+犁4:iŗb`TBVIe ^ `=0w~K1 [eeֵrtn [f]pNrdX<;`r4P\/FO:?] 0 lkϿg0 [ey[]؏*_ÖY^0پ0 0P\2@qr˾p1Q_`4`BS'``{B`~`̺.ag2ri׭ÖY׭]>W 0Ik/ 0 (.^0Pj) 0Lh lo@(w,LTY%LVXƀP2Wc2뺵+犁4:iŗb`TBVIe ^ `=0w~K1 [eeֵrtn [f]pNrdX<;`r4P\/FO:?] 0 lkϿg0 [ey[]؏*_ÖY^0پ0 0P\2@qr˾p1Q_`4`Bs>۷oOxÇ7={vz8>|8y溍?~|ݾ޽{w6fl7U2χ&ˬkڸvV׸V07X0\}/3]6~{O>F(w\wưe5WosWb<[a1|~cƖ6c8LnPɊrWڊ`09_ -ahF 0{7 ; md@(#yw3 Мn 0em2.-BXMh$f"+;M iCeuk&ˬ֮~mjV뾇0۷G=f|pָPvKM޽{79\oiP\rk-ݚ.[kq-q|Xa˗ajω߿_- Fs8y6-Ykv`f@(* 0PaJ 0@Lh1S~/4`g̬>gt˵Prm[euKSSܥabީNJC&B{n5](1lu]ܰĶ V׋X/밿ݱiX; 0 SaNکdB|۱ 04`B3D9Tn/^8dbrᗘ@!ص ߯^Bd.s3>i/euk&ˬ֮nǫ9ׇ*wY}hE(wX;=l>c2뺕ۍ1c{6Sujb`༁x 3P. 0 ɀ ޗ9ʶю4 ~>G:XrPjMTY=͛7EYT"mS3r<ÖY=^_cnu}6h8me؋oPSf`?rS+J`n6Pɚ14,Q#7I(7ϺLu`̺Nu߿5(X%UkBj-JLZ6!0_u_d1m̿sVr˼o&㧅e>G5^d_fѾq 6z{us2>ưǾM=/~cXpϨ_Rrvi#m 022`૮^v ^ `=xu6a\¯+&@mJWO k;=.` x  0XiE{yp5US`R d clnޗ/_r &!V3 0ϟ?m+t,J])7+6{Vwv9v ニ/-y}Vi3Pּu/"BN+OTd駟\c 2u zMNcC vtg`lB=aK7sߗ撟+,g 0 h)u"u??L`n,:5ox_b:&w@.c۷oy}͵9```MBB 0pm VoM>|ɓ]>ʽsm'8y-7 0 0ptY.JIKh[w 0~ |ӳgϮ]ݺu+k\K=P~mq1YX&n9O2 0 d trsxRC]t`@|1j{8꼅/t^cH504?RXuwmc0>khzX g=}``u {C(Oss3`` n[LշYzr6~ MƷcc nC~״X/K_>/o={X@b~N+! 0 0@y:ASl-hcm 0Wr->G}# 0[yδk"$e.Rm7soؗ;G26_z^uk:ư 0 0P\\(ǏOnJCxȮ0{èj%_u#Fc21&/^vXܶ{-uxӧ⯱[_7Ư j0s n`ӧO^ 0 0UT.y >[Ga/_f̍P/xub`u >_fUh//N1snڙ4kpUh(-WK|?]^^~,3dܴ~꼦` @MTҋ9"^Sz=3 0 0 ZA5+CD0r_KR/`i Bj۽{Vq}UXuc lmk-BCGX'6 c9ҾXX<αKo}Q[ 0 0j0^jCx 0 0 a +w޽Xf@}Ѫnvb+V{U֚ԌFx%Vٳ/•їo' lg`h7+"@!wx֮ڡѸZ^|~?LSg[=w–m[X}qMksm 0 a@(* 0 0 0EL}jJ"P_=wա,Ճׯ_ʍ:VaOpX)_( 0 0 Z````bԌ0쐟^2{rS^xH%Qso"]_ӧ\V:Q1m/V=c{a`]׸L =P{[GyqF9\1Pq_skmz 0 0 e`7O=Փ``R W+ׯ_b%֡m?WWW,C )E:VI/mׂASMy?C g@wZ[Wb_cݏ?N}Hk=CK[Vϝ֟`1208?, 0 0?B 0 0 0x  "<})B?؍_燗<>;B@?Ѷs!X Dxirkxjf%k@\KXq-_pnBIZKu_@6u3|P}}E.+[=wZK1sܮmq 0 0@^x#/ 0 0 0DO>_E={vc/V5^ӿm92 27f 00x§}6e`؋'Se``1j68jO>zf޽{ǡ`gg ހQ rVǏbqWF(7[Ümp 0 i  0 0xݻ7!ȇb./9׸Vlu mߖiۘcm@ݻw۷oGfVM՘`4n ```V5>x&V4Ok+V4u*GWWW:WzǗm+˘@ X2BƐƐƐ 0 05-v 0 0 3?xqqqƽs˗&K}6G} Pssvn lg@w^݀s޻aGro߾}/|}c`` d;8@ ````>}:=y&Sr)gώV?A.kl6H5 Lgfr2ĉp TKj``ʵ" 0 0 0"b>·nݺ >}TE-P+BWWW7up-m3b@/^\\|Kg!K\@v Ծ\w```-n 8ߟ^xqkxlE`(@c޹s&+FpH˯}5rsLƟXG\a\7:/? 0nÇ _O``Î#ɓ'O[_ַ]70 ƵԑS X=ǜ<g,qo_S? 0 0g``6z`0&bEF8y=n޿*><f]]yծ 0r3liC̍_YXz[>g``7`; 0 0 0pp1[nݬS?}p,իW?j^VU[z. @~hX D7¸1A"a]xm,jc X=1n- |zH׈XҢm 0 c$Jl@Z;kg``xݻ7Ab)7vOpIl<uޝb V-:{{o߾XX``p \x0u0 0 00wN1)]m8 ZׯXɓ'X nmls3 dn& oH?RxK;2 0 0P |``?J ޹sKAƄ dh B%Ϟ=Y3\Kf`;Vݮ t2ʍϟ=p,ίs 0 0\Lg`` 6aXiZ7&bu@,`,g)&m=ztch\.۶˶/ڗҀs } įo,.7wǹ`X߀w7 0 0 0@b:·nݺ =}(\O1͛Sn]NeZ68cP+^ʒ9MbLv|6 0 0 lk{:նJk`` xǏ&|<رXٳg7AX:w 80{1`\Vb~i5~ %B1 FeXUW``0`Bnr:N 0 0uݻwoV|𡟺73/@=zt߿zpnau>4Aj`q},9o|2pLV=fwu{so߾53̆``P [haPھ 0 0 g y< O`hkm?~|a{e-26`\}A_``M^^^``(ԀZ5a 0 0= ޹sK:y1?wnݺ ԫ+瀃k6ԑfڏ?@xei6Fڈ```oLԙa``vf SO< DNLx(wqNcXA[Ӿ%.%vL\3Ӝ|OM1f`n |0 0 0P;| j 0 0C p}U̘y73g@LVq~n/cnu_ՕXπskkQ ij]^^{ 0 0PE-G:Ng``dŋ;wܬa?13g b֭[gq=FtXf X=ǜ<ڗ}{x-~XmߵՏ``̀;w 0 0 0@޼ys{MÇX pǏoF-ˌnYtNWO``_z_ꕣWNѧ}`f{ƓpOí 0 0 {/u1w} Ĺ썕=zd> 3RzQO_ވ5Ê99m ˀsUo[4qyyYhsZY- 0 0@yr3 0 00Us˗&h2#=9BM5-nݺm<- Y#'#{ 04Vϝ~i?1>qGxg` |7 0 0 d` ~ɓ'_v(4D%ߺ<0UXmO[ym9H3 ,a\-gU2ߪ+ 0 0p\&3|s}yW^Y5`0+ӧOO~0O1ֻ9'|Jz.[Oeۗ_ 0rKxς˓O]B-``0Ԣx r[f`(/Nwܹ ]\\>~8XU 9(~*5{ŀgϞԙ W)^q c#ڱ4ŸϞ4+͉qc`g`I0C7jŶ| 0 L3͛Sl|uS]~ 1^|:-J[1 0p~@{co߾5ʕ``(ĀBRHx D0mm1 0p$޽;=x>+&^#ǚO1&U˚ kUa؛27wϟ?~Ntyyie^v`` YH! ]@=pp``"vqqJ/_to5-|؍0j,ٳb>F `€s9p.8xf}ٳ"ϊ<{e`("PDԗ} ?~Ep8gn0 0@O>}1_~2~{yTF}ׯ_=og``VhG>c◔"ۢ]lϦ 0 00΀ u!nnn̲:~ 0 k &R#|{֭P["B{ko/f>xy%Q76Kg 0zno&@w_sVٳׯO1nMkw`7`kQ_޾}k|䙭g 0 0;5p;-ukuTG`` O޽;ϯW{"^yK]ԅ D7ΙWWWǏ|YVsejh{`V o1qTZ]+Q``.BB 0 0 0@xqqq(s˗&A}d`hnp?9FpӧOΫ> 0z d! |ztS(0 0 #}E:2 0 0ؓ'OnBc1sK ׯO5+>+V{n5ڶMj_`j깬 uX~۷MK:`Xƀ87 0 0 0pX1*(aOZQ8le/s_F~޽ӻwL2;:2 0@F{1Ac_|1Q]^^/et~SBA 0 ,j V_|M+&1?n"Ekz=/^t[n9o;g;g3 0@k q^'&հf`jÌf.ȱ"]QW^ :1 00mBPtguYp~ݓIhxf(рs.tuuu=.іa6 0 0ifWh/uJn3!G9g7+,ֿ tqq{n>|xzק>f26B߿O>zP>bm`ǀsiGǏCk"1d ``a ˉ)xGiA3?'=37`8g͛7XI߈[r0սE 9I8*P6!ood``k'S\z# 0 01 k X@GTX 0@޽{w&'޽{溿K_b|8V[/vV_Uѝik) 0Kz.WK:g^]]]b+``J0Jr :< ϯvV|wa``/Y{ilO1f@7+տc̸&>czjsm 01 X=uս52 n 0 0k4ڞ 0 0 |鋕cB"cR. y9{)V˭V})V_[ÂmeA=ԃ`z~k߭[ꋉo߾5Ё| 0 0 (d`V{ko`` ?1Q[n>}zzW/5Q݇6ĵ FH׮ uy 0;4`}܃}\^^Wwy`o 7 0 0 0/^|)~@p@Pi;~1 Ço¹qڪ2 0p<V=^s/FbJ1 0 lm uP``3͛ݻwY9j]M/wN޽{7jUX_z%๓/*ZnUè_:Jp``w:o߾5_0``rݴ3 0 0@V"0WM4dC"kz܁ګXU7~Eٳg7n>```{Vݾ5V= 0 09YM + 0 05ÇA/_h8@N?n=5%V;V͍/{Xm99>'`1`mڝǏ۷o>yer 0 0PcW Ԁ`XONO< NM02i@wss)׿Xi=V˭¹qJ9>ǡ/0 0Q X=#گ~Y۷4xn 0 0ɸ8GL:fQ`` ޺u&,St98V>XEٳg7כعsa``?ZWjuuuu ҳ3sr 0 0Pc`:m` 0 0/^|r)~Oڨp9[هN="H2%鳙f`3`ښm:E(7h_>Tmݵ; 0 0@\\``X@ݻwoQ>o5}dCuۛ_;~~-UΣ> 0eznY?g|01.3 0 0&````q& b޽{(9PTuۀ.SsyOnݺYi*Vq: 0 0Ucz=0B9=6d`XK`@iLƟ`7狋qܹsz hp? Ak:u}ɂO𾡭 0@u c ۷oˆ%c7``J3`"D a &aDy|Ỵ㆚cM7& 3& Q8u=NKӯ{5|)eӇ``WF㘽``J4 $Exi5+xvLnH`r6[ӧOsqϙs[cp]p6#S> %[mw 0 0lL8%]^^x 0 0@%â.3@ծeݻwʍ^vW}` x_ ??NyI=cX'V~g EVsjd DXI.E@ct,lip.? +Pŵ֭[wQKkس `րsmϷKbryyY~`` (Hf14g`ֆ j?`>>|gܹs3b`]7ew["ʮ> 0 0P7`\r;'4ʍ/ ?L2 0  2 0 0 HzxCϟ{ ?G|/}p-mX1߿s-|_iqtd`zWNc|a˜\``?M!h`` Ov ӧ]9M/^^*q}%F'``x{ϯ$9?΅ 0 0 Ag``N:sMŠ8gh@,5\Aݻ'+T[/}Im```)Vek)[}gvϟ? 6y`8n```W޼yspQ2ÇOl|zV|kek¼b aO=ztk+ 0 0p@V525Xٛӕ`f>iu``d*g=Xt޽StK>fǦOoa Fpy£m.BU[n={f*<{e`z<*xuuu=/jFڈ``` alI`؏Xfe;w\©Z[+|kߨ"|a*`S_nlng>)`eǏϟ?{g_`` A60 0xsG-ZS U>|x/LLe`ط~9x޾}k1yNo 0 0 huc 0 0 DPo絫>}zvjMpiw޵ zVgϞc 0 00ߦGvzuuu=渼48s#wΥ 0 0 7 0 0 /]\\r/ rCO/T~u`_> 0^7>lay? 0 0`1 gL&^s`޼ys{M((g4ԶVՆeST\ |*Þy> 0 \z˔˃o߾5pNq]a`؀l\)+58g`]LÍ6u^{3޽{w޽{jf嵙ny5O4wJn[schm-```V]K|uuu=Ƹ40/ 0 0ظ %R5iZ˗/=p.u=e4+]\\|sٮ[J_;6\{F2A[n]_#+cKYS/l9`_z~1_|zlqmWW0 0 ll@6.Ёt3 m Ʒg0ɓ'7׭xsj' h=zts͎} 0 0{v]R۷o)< 0 0;2Çyw;<O3?K-ϟ??ݺugh_xarݵ; 0 0kc۷ob<1gcZRwug`i 0 0\be'O܄qzt<\gl.cK}.Kl@ԿsS6p< 0 00΀sǵ۞Udzj 0MZ``26'bOkV`qqzX2ƍpp1~޽/<~={ 0 0`V~qR(aa1m 0{2&̍8 0 0sM&VȋӀþ;@-2X%?&cgi-ﺩ0 0@N[x~XVٵb_Xc`~1 iR'*``޼ys{MÇ@2q\[W^]\bL4ϩ*,(n,ONRnu.t{/ݽc` X=w6i&~m#e:SWue`  2 0 0o rK⍁nq :V֏6o``R X=,/k1F/O8ڲz2 0$lvg` |*LsΝ˗/=0wxM90s}y Ĥ۷'nݺuS-ڑ`f7AFh2 0]|N 0 0Wݓ'On¸ybN{>.;O;` gqo+aU_t\-9o|2 0]S"לc~/b}Hԏ`H7 k-` CȊUO 6f 0Ј{~>ݻw]vҷ}\>#hD_3~dΟe9``dqWH``  0Im``~y=Н}FFpˮ 0LIbÇI2{& s.b`9 X=7_GWWWס{ n=S 0 0-4 df b4WʍUx03}06j@y"rqqqsNU'պZQS\.p38b!^|yw@={SC5<2 04`ܼ{۷oO <̫ι 0 c@/0ΑOP @< o,ho[ 0@O>}q>)V іimYj{ rPm61%/FHJV 0 0ViǏ?5Ioz)k o 0 0`|?{,G^``b!·՗*!vt!]8Kk+־4ˎe4שj>]>|xPw_`-G`H3`ܴW"?WИm{^sƱv 0 0G 0 0poڪ 챽s{]Jruuuz@sp6\ ~{XA+U#\^q-cyX3 0\*;犸]޹\]hcm 0 k ```, ĊoO'<߽{fXi.B5jhok+4VzԵj? Υh>q 0@PGkqB||y|֖ڒ`؇,: 0 ,i ~.9޿?ss#dvmϽg7!,k?~=E`b6ߋUkvֵc` V=_Pn|Z 0 0@B=ԃ`6Ajgp7 y}ާONO< ><`J.muۣ[- i Ǟ={vsh^-~۷Oqfؖ 0 ,g깧◺W"r -Tm 0 ߀P``63߿nLDpBj c D?O[M?xYo{n}cǏWݟ5ݶ j 0 oƽ秿9_u=poo߾um^ay\3 0fe`` k]c%-ï dsTrR 8[n5…mr L7+փem_ /1ݒ6Ԇ 0 0j(c?Sk= 0 0Q l8J;N'``@rӧ?mMb%8u̳/GP?/V5w zWWWG4Ϛ0ykhn߾}zV }k0y;Ua`l/ſ}n\uɿ{s}> 0 L3 ke<`` p8Zn5#[m;Vڴ܃5ܼ1w}}22+c=z)"D b%g˧z 00P_z1:3 0 00 W(``nrf8{}~@9.//+bu%kAz#zqm~mđ6b۷oO1)_>o/U ʁC߰2 0RVύER~} 0k)1x=}Խ$ 0 0g h8zF3iGm";׺V_x))|oH8n)s ͹j !ZA߱gϞ%#f`J6s~zg%ql΋ 0 0S 07;0>l}GR\0G~+VנAb[=PJ[1 d۷_p/@E %ԐAw_[(Ny`b`/B[>bq:O2 0 1hby 0⧅j"o{mlիW*Jk~y,¹}޻w޼Vm^3 0y۷Cl{:`8W c[r-r✨ 0 0xBVch ^ B{/`?$ G4Yt0 0@.Z=7inR/ 0 0#=f`X@e#l߯÷mE8J1\\M`|qvyyysm}m{W^ ,6q*j 箱\g>}}j``H'a`X@p,z{s߾NS%L ޽{V>ބ~ 0kÃn?;޹s{gl9` znܫ um|sW``rks``U0ǃޡp> @Lw|5S3`1o޼Y>x!_`2j 0 0@%``f7!qz__beޣ :pd͡``Uxë`csժ9?~<3X]՟`=1f'^``Pz\{}p#wj 0^xZݻ2s; 0 02ӧ޺u2}DjW`8\+1 0 j ~x?{\ i) 0}"pU>}Z}c_{}`Xs?}pݻ>\¬s O뜟vf``}n83 0 fכ=H~. ?2 0 crXь|Z0 0@IůM ^\\60\BI}ޱ1 0@Ӏ&7 0 0bCgPX׀̠`~1Mq? ```έfc~qbGԎ`X, E[h[{3 0@>|[ne͛7he{)V׽{s}ra` D0vl ```6mqOg|>K``Hg iWWWxP#d6 n{1ŋMXmm6Wpg1 @ m+dv 0 0 \o {g``4ҀF2`Vԋ< ÙJZS<02sgp9`>}tsNAz>9 0 0gϞeq/Lǯ{`c`Ƙ`sN7{f ?;\x]7 0xIV!5e.H``` ]'ٵ_2>2 0 `@0`L (+7!B>uǽGa?U_C! 0x}V1v})V'?jF 0 0wo޼~xH޽{ {d]`` g1n`f7MǏ{hr*}߾, {5H+:>`r5ӯ,``xT/"y/ǘZ1 0bX۷oOsF$ݻwOsj0Gگs.s1 00WW_~m"H??6d(@ׯ 4}{e``Pc6c~5}GA.//TwwzO=Ԏ@qw9Gdskb``޹s'{?ca^;1 0"غEkKs`4]ws-~_l7`(I ``z ăXaΠu뻄c` Gl܏q76%K2 0 _]/Wx>3 0 j` "_: 0 00p/<46yb 0xnm+706Ӗ 0 0pT{__OدHG}`kc}:: 0 lajaXӧO{}```#?ݻw7M O/^/͟VuGkC@{^}O}'ׯo./_ 0w򌸬g꩞ 0PܔԱ8@"Sm 0p$Ϟ=l|IUُk 0Se=; 02_?B1uﯞ1 4ӪsyuwnÛ ڂ ݿw'bU?'tf(Vr`h;C^/a޽{K[ JCQ۷o;o隻Dڕq/}1c87==qAį^k,=ݴ[n?gYjK?YoO$?_?"mnkym eq~'TM`B=)cpb=*{cw<8@㧟~:yɓ~s<ŌkLmfjZ(,?U(7&SƸlNX<7)Ȁ2kBeu=\sh_x iX̺:w=JzNZƗm_=a0ـP.9o|2@0A]NS3OBnH 6￿~h?]߿ukSe^rˬkJnrQgYf@(L:euo;MP|&R= # 0MP9!2n@(W7Pnx~STۓ'ON[=+ؾIg`-woBe5ww!r9k8YۀPnuʬhnBLm15\sXb  2u_"sٚb`Pnsb>~Tmcrό0v^ Ϋs2:ǖrҟme@(L:eչ\]n@(-2@)6q8)20ހPN1B͚}ӃYEBwy 2`zT2뺥)fj.B,e42=Y9bd Bm6b@(!NҀ &dNd 7 ;v ,g`Pn?>y޻wo[N,gEj5 .ӛPnu]`[ -e@(l\-ӏNu׽ rCa\.`  2u͂;-BMk?~<|S?93߿Z=ݻwt.\=0QykPnuҔm35\sXӀPnuʬ}g1\pg 1 '^  lit!["m'1ʝ~i?1{(Y?bǧj߫rbD2BeukWr9ÑhnBe2)sr[ۜ17ԀP.+Cx+ 0ӅLNovn@(wzr BM߿?]]]̭[N.';Ѧtm&4'[f]>?GKj W>r4dSf]wbkB nm2 ˆ 0\\``cBnrY4Pnmrr.Orˬs`@(; W:e9X]n@(-2@)6q8)20ހPN1B,Dj۵ .ӜPnu]`{-a@(%\L-ӐNu߽ 2AgˀP. 0@rr` YfӨP1뮿{LTiT(̺~> 1 '^r4gSf]>?GsejnS> 4% ,e@o0RN ǀP~j_Ց ~$u?MTV)J(̺Zr5 f6ױm Yc2|{7 r [[Vm`BB 0rTrS8ʲ,[V=O,ŀ2- YR;>r]_s5 [Mc2y~6ԀP.+Cx+kenms 8Z0(πPny5OմB1Duy5~*[f]յB1X(F?5)wB ݰ/װPnoՖJ1 + ll@(ME)7,BeSTR .ӲPnu-8S(?WBe4)GoC 2Ԋױ\6g{1@axz WSTMK0 q CyMTWBe9X]K0 q Cyr˫iSc2{7 r [[Vm`BB 0rTrS8ʲ,[V=O,ŀ2- YR;>r]_s5 [Mc2y~6ԀP.+Cx+kenms 8Z0(πPny5OմB1Duy5~*[f]յB1X(F?5)wB ݰ/װPnoՖJ1 + ll@(ME)7,BeSTR .ӲPnu-8S(?WBe4)GoC 2Ԋױ\6g{1@axz WSTMK0 q CyMTWBe9X]K0 q Cyr˫iSc2{7 r [[Vm`BB 0rTrS8ʲ,[V=O,ŀ2- YR;>r]_s5 [Mc2y~6ԀP.+Cx+kenms 8Z0(πPny5OմB1Duy5~*[f]յB1X(F?5)wB ݰ/װPnoՖJ1 + ll@(ME)7,BeSTR .ӲPnu-8S(?WBe4)GoC 2Ԋױ\6g{1@axz WSTMK0 q CyMTWBe9X]K0 q Cyr˫iSc2{7 r [[Vm`BB 0rTrS8ʲ,[V=O,ŀ2- YR;>r]_s5 [Mc2y~6ԀP.+Cx+kenms 8Z0(πPny5OմB1Duy5~*[f]յB1X(F?5)wB ݰ/װPnoՖJ1 + ll@(ME)7,BeSTR .ӲPnu-8S(?WBe4)GoC 2Ԋױ\6g{1@axz WSTMK0 Ҏ???ß6͵AmGGBe5'q_wr7ܼSBqOBe1)cRqO[B܌qz{Fr[{^``/rM3uӰy,BǪ7ͩU$k;=erˬ>{_]s~ߺ z"yL|%[c2:>|5%X;C_hǿ r7ďP.'Cx ' 0AAA`tBc0nD C}^C_{.>sRwmz5lBeus1B\__I%ggY(.O-ӛNu]ל{bZegjۡ9^so]-õPnuՑJ6 ke``cB_h|7_|36&㿕|1vln6s4 9U ?S~Ps9{{)t(*\i ٨D}5N>dr9;\aL Kڐ"K:}d;=0Z[?nΘ,"ʅ9vtڣ:ګtra+S ʅ `f1^e14"0Ei.M-Jp$(͗D^Nr]M|KQn}փ$Ts]뭾P{|$o@_n}(E:1חX~JLtK,A?K:WsÇ޷,>cӎ_5vus=ucuԦ1&nhvة1_yn,Dm9_;\߄; r @eVlk@Wc8.Qrn᱅Ϝv3szffvڊQ';r%MJ{*Z9v |Qn^Hmؖ1q|lfl-D}\(1S:},ENØr3ra|]Bcc[זNO.+<+0@WYW|F0(؉Z\*=?DqrKuDm>Dێ_E\ڬ[:G@_5DmZ6X;OrdN;~]pӧֿoR{ĘCG;5֦Î]<\"Ec E 0RI&|:cbx.:tlzM;!_ֶ/6VTMcQar"lTvVTSuK/Qn1'eLk5ޏ(Oc} vџ|Qn; ՘cmbr+D5s|` ƫ,Ƴ:0DGwZ;ZM(3' h//hB+~S?-mNu|]SBv{d^+:Br+Ǘ[?Fu mǯ6<}MJaQn;|mݧ|Qn*-irnbL9ugsvzhMCߒ~gnfMmcv'!mZu/^nsWIA ?$Op 00eQ.\@Q.Ieu;m3Y`(w}GQn~?p9'-\j-( JqR=lTvzh\ki[# @hQnZǨ^׿!9:КFmisRFJG;5KSn(J-%އ(~Jp;``MU\Mm0К(wʕN]׵b$:u_Dm(/6wtlTvZs۶ @h/L( ~jr 2&n/L(Ovך\o˜>^+viq(jcQ.,ᇴ0r @eZ媣ҩclsWome*D*r;6j;1lTDC?rS:jd4_O~wN߭-}NzKAyZ\}'Kry5c4rӡf۱ElT6YyDێ_[B{h*YǪrcXX{czyHԥڦxwtrrh2iǯr_Uuqk:(7Nxjq(z; [ѭ[~ŷ`ua`2DMIG?}OF9i?񏿖UCʯw|+=O|z #H+ʟ3Zhwe[+oC1{^qind [:|z86& DsY!-Lm۷ٮQQn;~ɷd(W=R_87fX}xuȷ[>FkqR{96`rی(Muk-QnjӺƢGXW}yB܆[qXk~hBsr%e9{Q~^ia`6*ji`>rEbaoKZ\`9uj\sgxt.[hQ ;ǞӢ10آƝ;IlatC1w=Ks֋jr9mD-QӒ\mIi;T"zlT寜vL mǯ[GCsC kN3+w[x|]B[ަkpZKiL|8Y hTvN;~]ko%k)El'Dl6Kڗ6e%ܱ]30[3(Q. Tf7Q:c9§I#h8s{y:$@ 3WE/["D$۟z9&΍\q 1w7k ;11]~DFsoNu6&+LjrkMs({|ɸZx~YB~[7(W~sCKG6&+uȢֺeےZ(\Z^ =0*r4ߡQ (ʕPІPA5)W`ޑ~:c߷\qPt)?ız@A]*O9ߞz96Y-rS_,ʝk ǧm0Fu~9rkMs(8rg uu Qv[r7(`ztԘ80[ 6&+u먢z6܊Z\pZ> %]006r @ez ^pաcb&%;q++,BS*Mt 4gXJ(|YX|(S\C6,<_ʓ)QRn1-o"mc"2(T㘘@N !瞍>;rkM%c%c}|ƽ?xyi/DrRKki]Ҽ8Wi94{D}iל5Vsi}7g V-1&.>gNB]|}Irlm;Fّz;Q/%@WYcy/ @\%~yb!.<'M8V:Yv.ZyeqpxuONEʒ+;<72E%[-nkrGFk?\W r5wlTۨEZ|KƣK6Uݞ< ז[-^(iDQy掹"c5^y@FdeN;~Qa]ZxLB[Ǝ[XkdνT|>רrLN(\D00PDKo_V .w6UDŽ9)%"Sd]K|rl"A顨<Aܱw劝mNgF6>T?_١mQnEBk==vX߳^ب۬Z|KƣK6U|{ZJ:S>(=$rS==C=ʂ(sv:(7º>|(-|}ɜ{(<{1o}DN ``TU х2Ы(׻d;'\":QaPd-νMeIhs6:fۥ[dD=$J0(~іoŒSck~iAmp>m٨ng9cD&lΘkxԻAx~,~̲(woO35&>4k[~ mm\z&砲w-E,zkKaW<^!]ní85_/w<|.Q./ E 0@(wƹ:56 HO5k]=/Au۱ȴsm.Fe?&@轹srۘl&--@8&k7,ug%63 rkM%Q(i$Qc7pm>_DmMVבDŭu%De&9Q.v\yDp&_`J0y Hܜh56С<-BZ8%b%\O}O.f_N42{uɢ\-K{<%sFN(wn 獊r(:q+&3566k3DuM1wwzܡ3'g <"m}Ii4ȡqʅ(sv(7pw-ZtNa]Zxl]B[֞kYks5fp&r`Mx7| `Q.\hܹΫƆ\EfԂg]4X:W/w@TX*;%O,Y4Tb$g<'O9)ƱK"=C%cߔ+6XOQݖr=(7)A1d<>gh>| Qn[vLm9HYcL|M[6)-6XOu Qn[mCmuCjs{sϭSc5x5_/KDr ;|n(bbN406r-NZc}.OkD?FI}Lpyl/wu|s_>YZ*!_-'jE9;6ZC 1Qt7aT==zFf-p(nqhhxԻAFcs]GB]1rh֞gcƘͥccs# ʌ( svYp%Ac%;-D%(w1q>Ζ>[ﭏri`_ \D00PDsHu56O ֺ(TtٹArs"?sB%F9yRԵtҔSN1AIƵk埍긾[=([2 ʍ[ʹOIՁQn\,3iǯr#/S-S喱<֚{KDrM|n(bbN406=r-\Uc=(}FYF,I(wN4kɿgiɢQnެSNѷ }}idS36׳mMnWX[Kƣޱޖ|%l;֩Grױki^3&Phi&z6&+uu±緎(7gߠy!ʵ٫&_9w+ܚ>ۧv'XW02(Q. TfGQ 1sn w(WkߩDyYh4s:ql5fIDATs"{nC QԞK?W?{Fu=ۯ=vZk3;59th:utQn11#ʍiSkdŻ|.㢜tr  ƫ,ƫ|M1Q{:Ŝ QnT;0&\ڛ.Y4DLJ$ԽSiK s(7F1/Do fl>Fum\urkM>o Qa῵WD9Y-UG\.r]T=(o5N\Oj)#R. +|0pDmb@<zD56glݥ<.֢\b̒E#;$Խ\ x!j%ߴW?q} mǯ6Gc%\RH> ʍcc$QMmi\?!˟ug1~znXe>ȭE2_o\~=)<ܐn`dQ.\@oCB#oɿusk smcS,&e>,>N7*r&g-voyDҺU"=7v%ڏ6<#mǯ6GcZeݺQn\+u ^K Qnfӎ_Gn.^eےZsX[)) OֺQ_ 0@!aClI{Li ><ͽ;{F!aj 1ee/Y4_ze?TZIrGC: b<߇Fu;UZ|KƣޱޡÆe[zp?(;ؓgs(7WMϥ-C'e-m_C?&޶pr%FVE5:TvQn[}hcA'xY'][̳QݖrvZkox;;&|+qCnuQn}ԃDǡyc9uQn ?uw r)35"FX_j7/[roKj׽sn+ekDp&_`J0(Q. TfQ)A5ZK bk'>QD?os-.{L#^h4g\!n͍U"1M{L؎@ ^=m4lTkD0]5G8V^l.cǞCۆG..Q'C\Z'!=vdɚt֭;r׵oI~kחxEv,]ra`-x/l b1^e1^)GhEP2yjmաccx;~1Z*O-,#ZD7F'MZhd 'O m_M{lUu?A{6~MZ|3׬7{z(w;/y4Q`g//鷫/r\3iǯr#oY{"vڦZ%+KDasvQ. -R E 0hQ!T >ױ I-sd,y,Ή}#M>i{s7'Jn Q).\[~QnEB9m鱺G}BQ?޾Qn;~Wsp`ܧ?iXMKܰ?o=6ڧDRjtӎjvi-6^mguh-^(|(Wuz;|F"k0;a `4*J;H@{ DjES?Mn;&Ի Ts}*߹J='Om!*26ΎA!!u(W<6 -ޛ e0dh>'jWr|~N:Dm'#r:8-ϩG;Sߤq,D܍>>R?6(WMrt8-\4$z6Y9bӎ_Gk5Fj]ZxLB[Ǝ[XkdeIܴOtlz;z}et0Dra`2D&"<pĔ JwJԜ!9atC:MS6%G[%F9zTTr<̥CƄeTQ)BCm|dm\|^6!vZktJy\h"|>>U;'BMZMk|>xbKΩ#{R|rhfӎ_Gs=e˺^z(w |(We%1^!C0k3(Q. Tf`KQ1g,mH{ekNa='Zh(2[Khfea%Wئ/:9s|w%F3@QnE9ufY}I@ڼ:FuZ Qn;~ePp\KhEF4(WuRsRՖ̭GkjQnm:udQ.n[d-gu%$m~}Co\՚/i_ٜz;.3rRVH+0@(`*3(W&Y;Am$i⬟USZg+U`0{|)Ok3}w͵P9Y=ߙ~7J;DEmʴ̍Re#wS{:kiĸҤ%lq-V;ڐumFKFu;mMDmUmvv9B O[uC}^ɾRxvYB~[:Ɨֱ|Fڟccb[jia|)660iǯjgk9}ZZ럵Ž$]^Cg(<՚/i_Oٝz[z}4p0xx` /4N0P\|]_fP-"k䰈(^s88ٚ6dQn~ݺ}{prj x'\-eQn 1ӯK;6ra6|1(6h`` :r @e2X>X 0zUl(wG-y-3Fu"ӯ-5&ra"2(O.׈my5 r ˖ ʅ-y[0a1^e1i@_ ˟Oˀ"Uk?(fØ6Qn~qB4d4&LD}r\On@ í3LeQn[^@(`*3(AE/ ʑ'?ɝEGVn8clj0Fu!ӯ5 K@ S}0UD}r\O6j2(jǷr6`3 TfQ.Wя~ rHD-(%eب_D}嶆dbQ.,@D%s> !OfaQ.XxYxْD%o| `< ƫ,84TvD}?s8%W_NڭA [w`}(OZ6@TfT6l"u+5~mD0:_[-Dra`2rT2y,(Ɇrg}r(O&QEۿE-s>bCaq\xM ʅͨl/؄H ƫ,ƣ2RaX |>('>(qcsبDۧ_@ =pLQn>U=eӧ_ik ʅ&2(_Ro- @/ ? TfQ.^cO~򓻯ʯ|M%r gmr YD06c< :}SI @ x$/8eQ.<& @tUEш ]p0>%c\da2`>Qn~2"DZ -:}uvV ʅX;feQ.Xyؚ6~36~v ߣ!@;c{"*j?p\qJܱYQ`O6ר7 r V 5:}uvZ ʅ⽰D0! E 0@Kgvg1_})M ʍ u 6r+m~Dpǔ?SS:}6\na/Èr- bb^@40gQvp``=g[Ŷ0gj"s(OFf\.ra%-@'oum߂5@ Wkp;rG`dQ.\\:5;z _^ŽΚ Q'_rm@ k[w`>yaӧ_-uga "r2" .\8- ƫ,Ƌ@ÀDv6`z}Qn~߄ ʅL>*>9bӧ_Ky5@ 5wDA0@(`*3(BQDcߣ3Fu"ӯw9 ʅNxNfQn1ӯ[|J3(J3`raK`b1^e1Z40r _\xwlT+KBۧ_- , DeQ.lFe|&>\O^@ í3LeQn[^@(`*3(AE/ ˈr'}(OPD;?*rdN~ڎ/xeQ.s5rank X@WYguSa?S)>Dpǔ?٨ϧr+m0~Dpǔ?SS:}6\na/Èr- \D00PD *zTPXFۗ?بeD}vr'ܱOQ@'ukv|[.ra%@ s[3``bbU=Eۧ_ik ʅ8 q(?2ӯup ~Fۯo`"ʅ ePˠr2|N"ٳg?X `iy>}2̒/بA{گ/_|ՖJѣyn^\\d{S=M}l#;_CD};KǸzeh<>asگ=yWc}և%mdNZÚWMW+'h =]}}{^Mh26}!>\'yODQ}.&ߗ~~vč!-&;曻c6?wiz#lT/!}ݯJ޻woVj#^s~pvBV;97NzNo5ftڵ=wl N"Y>%թSm.MCk#mb_5vXwn1ݻwz_5&VyɾYQ?@:w'vYQ_=R6D) /< /05@(`*3(@߄9~L?tl] C#[$2բ$vaC^g/Qg(~ƍvBS]˜X{t͍Z$іw #{QnZ[uۆrD''u>W4̝x+v{׫3QgۦcFlYuu xuvt{"sL<:הa/UQPz@ 7nH7k3(39BG.agrm(zlb2-k19+}rwopCNvDD:0=&F)VBOx,'.GڈrwJϥ4W4g.zcҾ}rlXOIΕ(֫J^cgD XϾ7~(.Q.t}00PDra`2rKwJ00(rᮓ= ]J@66z0\hI"7ب],[bLLm!jWD>t0p9rwJԵ hGMBg4\GqKa4#jnTu=oS J0Ӻ^n?9(w8]i]X4r_(w>׶r2 bb<*W \J.+<+[20(Wk,ܧ^v!RDr6ITzuj?ECڻ7Ȣ\1f;!Qז\[I|ӇxA1?'M^2;Z*RX bQn\<׹wjcVÌZ,|,egkku:R2- Q.uN# E 0@;vG@ Ge`DQ6#LYlrd78m%m9b|FԴݼys(QUZOޱ 7 sii$'X]D׎q᫩0|36rc[Fl5ֻj tkU;(^}m6nG;(zZN]؃e1^e1^xJ 2(_Rom (B̒|]yj+@s;%ZH]3e*BvyԍjZJ3(WUVdV[mNⶇhm$\tXzyJ.<7rsGmENY-^:E#r`u=zKj\Cwăaz`Q.\\= ((C&MBwxN疯spnY:CffkvwčjZK3(ƍژyYS6B]?y6^[;4#攭3ⷵvƛ_Ds,Li_N !,ND'ut3ΖjlXD5;x~{#rz-zM{iXzJ/6ۦQ::HZmxFiӖ3ºnXRiG2oUCu5Q.v)C!@!BX c3(wlSTFF -* ʗgk4OG쨺yezZK?(74+•߿= @6ǒ_D/V00>:\'""QkPwFecx`Qn.icmQ5^+/## @ E 0H;0Qx߿?ͭ&QF>>>/ĩvp=v+58(_yȻD"C"9Q=OG+[SDJ黾B_5_ U;#k)߯~!x?燶RCv<YZFADvg;%Q#¾6/(7diocS*i~}_<[uիݖQZu=z}~!iHuxT<OxxT~+ŷ Ļ1``2(ӧO&'60(1q[, ,m @m5&W~hDra`2Z@ @$twMuT{(}_F4/7o #Q_aF%bk .Zd$⻾˿{뭷BzĬWۨGGue99]۹ߣ}s:}]"V:)>xtP00002 d/^a#Iy:q#$VT/^Fg ڼ "w m'1C)2䭭~!/; {8;; ў޻wP"zAyУGBC,gڧ}Hگ D``XF@(Ŧ/ٳg![Mvn1...BBDT7Dؼ:Z$Tl'F\(t)y,b!zsֲ-WX+gѹTPz}0x1zC? bQn'ATp3zt҂p#\]lG{o]"ޏ.@ӧQ\~Da}hWml1xa"dSgumyȦ򤹌nZڏQ?:Kb(4mâkp<#i= `Q.\akO"oAa3kcLiWm4882ԧH>Rv Vd6"oں^m֭/ڗrQga` "t8:XWyT{Lݚ,ۈ(deu~|i={\" @-EgqFMVTADgabO|jrh*W=v${m\owQ9r`1-e >XW.Wu=:N"\1qʕ1/p/(Alu=zqX*NվOO%;Oؖ-l{ Ddϭ䷚)EΡ<H=y;OIU#NKѴ}(v~z3miM ƴk-Ν;Up`#H00@ͱ叿pS`s"\-Eͩیڪ9>L;|w00[1(da+'8@ F]IV#ZnkfRhiaaZQe...XqDo?z"}h^Y?R/' g@cC'Jn}߯QjDպkwӧ;? 7p5|7l#j)J_S㻰0@(`````xP溢46!lkܴޚ5_`;ڜl |5i,nwԚ_oguD?y3pM0nb ^BXC-!=z6S߳||C p 0  0zq$Qe lyYAZ j!nN[ $/m̀1JC'F52JQem(߭ ߣ4Lb@BJMkmt.M [Nr8HK3W//^uAnX=rD-Jպpt=} ߸qc6\GD0F;0Z4C4V]ZSA"?}ZD좱ƺmj1AX,l6 p``````€~W|޽{GȝNЂ;݉~Z%ɯm򋽰W\+-Q|Y[Jz_oB56g/䣟>IJR^V{?K|&5״ίkuf5!h]On߆e+?>Z>01 Bk?TmkQ=FN =Gѕ|%6MDM\)/50pY;-% ÞpVkvF:~hyٱE|2kgm4Ӛ3:;J}Kibouwu4J]02r 2EU-kP$R $bĴyH=G|Me׿&BZ dCS-4j3D9.x:A;;ZXw r7 ; U׻t(bTʭkC^hd'Ea(=O?76i,lq^Tt@cmxjT'5;-M_|/MкBZ}ćZ_Hcz#Z[A7:@5,ZJ09ڽ1;lޛoӱeֈSۤި%ujϡZ['Ƅ 0-3 h=-xiyI 4ztHw*ͳGml;위f?$Bv6a/&{$A/b X[5 _Hk:-ogwh9"\sP!#Xw:x[޾  0000-2lMWwyAH /6Ŧ[2 w;RH[r[Qy%)l:0ܞ' 3"sPu}[G+QD;s6*@ $B<8m@Kd<HHmdDu"_EaҶ-/׵5žk20pPt}RD5Nްgߚb@UG% @u]+c$պh9A.0J=9~>ub0.4t00002ӵO191 -ůAnF"YIs~~°^iu6m66КMk>5ֆFSH+[) "&'O)W+'_m@@R<Xm`5"+`J7 _Ete *Ֆk%/N޳ضF[ZU}:$)0φ @iX X R;wm#]ڿn!ZD:|#Ҷh2tpVu3 @ BMjjoR`````<:s-d/ɼJ(ٞڽyxC[jwj>?ZmIL!vn[0r4m߀-PRG.[ѢΉpu.u)oJs`%?``r9m 00000D͛I8NkMuUZu}?Lџ|M}PO>b7/_ܥCD˂-pj,n+,ߙF˭q;GW=݀Zu6E?*6+``.30 @ k$g5]7H~XǼNggglT8ҝ[)JϱcahFz=zD}]08?ܯh\v>%{Ν;{1^:X.emy T9@ 00`aɺv}Hw000&r9E 00000l\Ox$}-Anj 7޽ǴQE=Z bdK,#b-H;| a2 xKKY2iܬ[T9*eJeu eѣGAۮ~Zo- @lPsC>r3v-a7o\٬%|;ܸMp...6K}ǏB['m3z G)S\Ɖ[?gl˿`$ܩWOy0)7hKhՐn7``g@U)jÇY:2u~A},AEQ4|7c< ȍ$$E̱O){X3dݸ!tB~=oCc\†0E~:VqL¤f~pmG]ttEmڞy%','98 0000`e@ U (2lW5&|>DIƵ}t56ڮĆpK>}P"`oKyHQ5͉p5[[;#,Gs2"˳Ofǻ `2guĞ0000;wJ^ hDjkY>R­b2_ mp hqPDZY^#F6o9jQk5.]8g@Nsv;H000-r90000IȨEϟHuom$lIX0vv:~An̶2I5eŔHsnx~L'o޼I=f 2o͢sC$am: PݻgYF(y>@ (hښ waw?}?€l:mn7-KDD!׿?ZvIhrof*Ƚq˗à%E"kzdS"+9܊Y?";a 7=z8;8iJp/1E%3mad[|,hߗG|00Ѝhǁ_kC (Ai4Z)+"ץ$2'Za|_7#m~&vvv9"D27ʀGi\x[@|kW hq 4b"jKr%b.=}>(e#Eɕ/v6/;grcS_.=Is]Oӧ^\{ 05ύ7[ 9"|ME{.h)Xóa`=`Y{3|000PĦ<=yՆy׼KHQ /}i_> r-ŋ o(T:@*_pC!m ش& XE=i^HOpu^r[vy-Os8|_t0YY``3('*D M) fN.3L,޽9d~ro/W myS2Wvn1乿:O4ES=G,Tή0-a0:1pاz+պԨ1,cx،&Ix6l lǀu \9Ir S43E{|/~g O>|ܹCV:컂}ilZ+s+kUƣ3)$i|xƍW3i,~ZY{"Zn~@j?Nh)C;01(```` 4?)H]Ky{2|~ ~| {SCҦwpUw?u){ZXǧNm{i_)G|p IῖH}n"ׯ#uy[Tgk;u_竴GB:`c `>g000%HB\FLIF$sޕh3'R | H{} ?fS$W6{;bP:`׾G000Dp6ORE yd#mhUM^E"!FWh|!Cprْ6[tĹ؃:00@(```` ,́;wt[nE X ?ItK|LF:}Yy#BzN0"g5@މ, h|hjO܇ ;< /s ĝj_sJ"\6?ݶg{g[ l;")X~Lh6Ѕˀ6 ҵDh; r%$ºώ-'qEZwkyMgj-v˗/w޽{{ BP5m(%|6'•0n9{Q7R\sx?J2p}ypU+O00PUXX@ڸf6> Pdq'IKQT8#29ODNY)bm%o_tk ;u_d@Ԏ06#U\EeqIsEv|Sç5H'j|o= 1J򜆝 @ ڼfp\R.OWr~@$^iaf_#A|e) ҶPq|߿;6V_xAߨ?ڞ^H43-^EUۛ}ҿ:$7ߞQRh2*ck X c 3~0޴%zPTNrXG!|A Egj}dc BƑ0Lu_:ƟMGj?wWpsF[4~LwR lE}-3p0`!4 hW Iڄ/')""j(b.67e{E?P=!x\"ȥFf%q =2~@>O>  Gt)z4V-w\ߏ&RܫQۖ-Ğ00%.(YExɫM 7>|݇ب=8]NTHxXމ : ^S}Dy -&RD?mSbCRSn7!¥>hZG~1Cgp7|00@Kd<j Hؑ l-ՋblŨ ȅ~R6L$Ǐ郘;T;E(wNiw#``` &_```b@w(R.Fi}T=%Vi!mW[ԫ*-7vohggg؜}'ʀp%!a67 a'pAt ,hϞ=_t|#ҏ g?u @ `````sRO-鿙<>yFmLf7 !.߃ǚ $AπEMk)l˂x|[lw6>@C\'00e -E6D7n)R" :nG=9֛}BZa@Z3k%000 0F30Q2OWFg(\ٳg0b  \re?m36(@v^cha````\ظ}.47ef( ۉJ=ژ$*4bݻ'=ׅhe{ ؞\nos8P$(|E1BWa//QեthMIEyAl?Q \N[ WO8Cq&Ayh#m r!l>\[j /../7Ǧ>Hr3g蟁]@ֿI֭ZcR[  IcO"\m"Z.*jT> 00b`u @lhKH[ndQ7۫ϟ?sGמi!ȅ:"QEb]r}s c-%mߊ[ŮyX nLS'he st[`})# # XÇ_ 8$(\^&!"Jk9)DD-#,Ƴ!x>\O]o k]6k-EgLG]5cO>}uu=Qu _`@ekLvʕsD:Ь9ē'OXCan.hyXwv҆Նo=4t00001 iqH2j)]f~ 73rai7*}{vn߾ o6HKǁ sHs[-y7ҿpj6$)86(;H cݿGI+sO$TES%O?5uFE 0iLȶsvJŃv "E˥/c1n)l-䗺0000F0e - Y#b$FS\_ОXFQD) 7ў r>|8-^{ @K<|o_u|ݱ{dtCpV#Jޖ; OB"NEܗ ۗ?smRyg1(x0000EK f$ m?7$p?ggg,~m_ri&@E_9L;mخg4T;9=QGe ]1dkYOW-}okRCD RWK $q iؖޯi|C:(•(_ny!.a#A;)u?ŀs @0pH05 a``` -)2l־xmꤗ]խ}_ A"f)V<5h|| u=̀ƫ)Z&Q`52-ϹHIĄ5Ϯ ;b p 8@Nh+``1(Q. @=z(PSt?E%EQKm;­DrisG1lZ> 0'jn_sEܧk~}>4LcI!v}-t j G8" Gi0```` H`6j$<6׏'O鿱mlJ NK?z\IhMׯ3$.YxW4S.ETLS-20'Fo8"hԳ֘%3>\7k3?G00! Vbsl00u~Gbhy9cp^*b"a+z@ Mס#ֻꠑy_=b{lIPC0c՜W5T\=lvUlfeh0cec&?9 fQ.\E (2āڴaSrIQTN[E kLFU/Eɕ/5w ?κؿ$/_ܥݣ`aCI6Gz}\-|(z!e׷oߦ.S1p3㳑-j=ٛ ΰ 0ihҴ#y? @ tut "ZnQ7ݻl2 9GK۷Fۧ &;tܦ튠톭ը Hk>zvkkOs*ոi*p/$ʡӟT_%E˕Խ輒: c00`0Ƥ+Dk8<@DlɸYE׵{ThZ/36%|(R!@)/hYpCy*ޝ;wv?Ua }ϖl2Q'`k~~F```0_0000.t]{n޼b-Ǚ8M"!c(Sg*F~Z?~́ Ɓf _LQw: :u(T[J/Qրݻ4xF}-'xIf߿OMaF````Q("ۊɟFiA~pWnrNkĘD!ئ.opu}K |r' >myM"\V߫:4![s]ov$=-3pƍ}'Z.1y_Gۇ3K]a23 m-0x}bZSSV5H2l|Y߉ xO+_bq;C1Sw5`> w5ks"ׯtU"ឲ97%vhZ;lε$-0K4E&l`` &QLa``` $ɓ'"s<3oWlvHK'd~'D_E?...'qpM_p /ⷵvEM"\5J>_"#O)0@\Xuic]{6u{a/5`EslP2Dex}ͱ5a5'rz"E"N9{)z:mߧ3>J*#GSdyORGa>D˭>~3" .aFeQnMQ4000ڄՉDF9/mk&=?;Cک;rUo*\thIq|[<;ǴW\{MWi\w)J]ٲ67s߶- mzd  yČSY?x7/ @-ha```N2+ PR-~1'D1_raw$ڪ:I|> xx뭷mpht=p/)V;00@K텍 H>ݻ 00# )#h#џ(Ƈ֭[/}cu(\{[<*xaiO PWia` /_]vmߞ(wK3#•\c{QZ"">t}˙Jr%Ş a <}=k&sEia``ra```2 _k)Me&v>EO<ҖڟONY淛~k1rvvF@?j?ü7N"!A"0DbP"^}ףz"Yež]"Z.Fgmh:Ln|cg 000, ƀ"'6`O*$Lr"xM"C0cz/\Zwc(r{/y.;S^reMIU<9Sjߡ@>ZUWfɷaZmB/Q{e-1@````5O0ڠbrɁNoE>cL"9 )bSuAn>hmwf_.7}1W:vEDiWu>G{֭:;"{lA]iMh>v덁t 00 (hEW&U3?Ams ש`zz4Ial ,{Ƴ%ϮϮ7׷5Ev'C=NYJT8,g^r4_ͥ `c$9??7|k͏`2y :Г_Dǹ0^e+YYؖnkoޑBfȌ7D8, b@WjlBWWMA KMcx|>x`?>)";)30DF" S̉p%02g)= s*a5 Z.|n/ܜ&  @ A0000gSt\pwr7qIf,0\8*HW#̊Ǐ# =s|Ob EIW"V;Ј.O?u un' ΰWM 5qKss _ A0000`')ۊI6JlἭ{A. E:EGSE"y>'s |rek\^t Ç;w! R# "h> *B5ck;6.4Z.W=4fR?~u 00 j3|00D(6NVmNFr 2f$@dn=HQ۟?Xkd2gk#ژBe޽˵Ա^=e-WQQL-8p0y ] yFN ``` D=#Q롼("\%W>MgggpF|h/1P73F:!f!ﭓs"\]"3q1[MSMZ9v^ư]d`-: kkƻcu匙cz``` mg 001Hb>m( ~y_"S<퇋b"b+20JT50deh?~oth {=GͰ 3 Qڙ7o2^itڵk{NpmÇ~q}yul >xgzi{Z%Znk<3p001(5 ׀"H}2QXm5_\<=XX֜WslEɕX+c4i~V1C-o?S1ynp66`b2N```d?OuS>bNd`/U̲ 9uŋČZl8a%3wݷq\'z{rqk2hLxw/;J]캥tIc-n%-9[}h} &&r=[޼y k˽Ik׮yA^t}]\\/nR_cO3=0!Ӻ'zC)jzr{ZPr+ύO{=Ooܸy9{001 h,E1Cr[njwK-ڂ% h[*’oߞo{"ph"YsU鐑G=6'•/a|7v_}QO~yh!|gDˍ >ي݆5b|a``0.at* a@ 2ׯ_J䊌"prviG}E5oQyB -W\Sc4Ay#9?'1WN9V V7D>7}6.Ef.پ/k7rCܸH߀Y00@KEPB\ t(w{M"1l~hC\*Jaky'7׵qkKĕƚKEؾ?' H`C\ ۯΩտpRWcMN~![i\ư]2n/j``` "fH 00H7|$PN+Z.,ў@k#uu W}4W/o,k2/my@yMڞ̽9P/pcV=]~}_쿽96@ rqqAߥ@KG 0003Wn~:=O^Dkbk)n"ȵ ٳgD_mh*!=DWhI |rUmÜpuㆄDKHGG64=\۟-WAhfiTҡhTFl "&00001Rvƍ,|)GrYf"DP,b2rup*U? -g_l-?~o88͗u~~Nc #m.'NXq ݢyn:|vy.u1ݶ"Ck>g6EJ [΀ZizY?``e@0003)S YO\&-;$Wtlxc(l&t#Wo,Rv8WcwW\i,4pd~)IwRnk_?߼y>?ꈿ`;l0pa```S!HZt '0W91jwz=ϊ*wxw 7?vF춫[l_߄5HaA=G갞jMùԹCuN\ꄑ5e?-7?amϧg8"NE8@O00c3 Q"iEuU\=v]nqaEFW/1"qYmSU3v( Cjl_iB$.ցNIsX"ܱ{{X?i3>[@bmDz?r @g hNH _,{+6O(]ۥ^ecKd ȵ +Eund̚n" ^Ezd@ĤZ"4GK]-qbÇՑҶ}ԯQ Z.: 멯(Pa``pРԩ  Ȁ"8 :ú3mj[1ym:@21x| #i#>o߾k!ng#)u .us!E-2#U_y`xMf3ߚ q Zn_P/ -R>|z00r @G 8==PTMrp &v[1Zl~:lwԮ 6f!.j%ߒ]w0RD@ Z# 5vVmHSn[ϳqnVѼݻq䖾!Z.lߪ[?x=`J `Jg Zw ? @ 8֭['#im$u]o|aA.u~#JjwEȕ1wF)c;l⫱|qR:!b-+P<ܸqխ-ӿpbSӧOՋ6Է R6he!p  ӶUd\mxjJfX\El[϶Ix|rV,Eڲtx|]c.lC[0Pu`m˫{sDk~:lYG ķ.߂s@;@'1FEi  [T*4>000.NO>-Bi3VM]{-y|#ǽUK_NUqH*ŀDGNj/'.sQ8_"bD~E;o/F)mZcS7}۷U?GW|W miyWzGhk2ȷcGܘ~R l$-N000"````1$lH*no8_?9V}* _| >$mߏ7oD۰&)$׻鿚m)4nGO0(d/ʻthY=7nx.G )Rƺk}/ gN㛴M001k5uD{eud:2ϓ QA#mCʟSHڬ)*b~o[2͛p雾iyI[oQf@s76߃wrz h\  Dra```!>|_HQ-0~@XR#' pնp#v}GYwORYn2nvh[qwc)Iƈcp={j8\aIp#sJ ~lb %~ 000pD8 p ``f@As<|&bOѢ%A.mWηkHb}ԳtU"cml ̉p%̕@W۩mtSXt{JIaG)Sxe 1nH\xDra```yrf`2X3m޺u 4ŷ$ŶDVPYo|RmO^h{(nm2:ؙ ݣbk<m{SkkR\s6߃wX۷o* bE@tHkÐ&3Ն,izl,eJѐU\EJ,l[lem^6m?Q:J]jL$D{ȧ;iXԵuEb{.j4>X€ڋ4n/V%y:ܼg/N00%@o@Vm &*',2ȄPY.oS. r40"wlԡGQs}񡕁9*J'nn^!O_xA{|$ׯ?R7oބiffH}!rˍVV߿x`/C0P^:_̀6St -N\m" :--)gsbCI>ENr)bn<O7(B6aS$",M|޻w6+: m@^}=S-n ;rׁ˯a Mo0@ e@Z0&%(O,BիWwԧd q QSk{(("#fK!{y׶u-4^{cZ "=d= GSjաM5(>A}1 Z~>g=00PD8```` ?~=H ;pn>ydfN&,&T=@7_1|n %" 8Dվ/p@~?'½~NuȢv ԡ/^0dܚTT8߶~߮rak-xo=tHV\c{uLP;~mT~MKkEmHmV ,xv: w;svMjDG$֡3ećP"?nݺ*kM{]p-1(Ϫ-囼R` Dˍ~(@JrŻ `@Ioƀ67о>Mg~?Hb #oߦ #^&|]lvșȟu|gE$:~k 7O5]kԆpl̖엢OmE7ƍ; U9"HeОĵ}Pby?umtr={Fmtf)v;9q`00-,2 Ȗ30~x)}4bN"rcІ6|S4ß3<ӎ%pAv$C:$Co#SF}6}[hO`1 c9&r{bCl00" "\``d@LU#:ا:z}m&A*x~{=~x~q]_w55;`?GAݼyULuTN#&=ĠčE>l|5;Ǿ/ a@k7rmد6,k,XE01Pd`>(/  @i;T GԽuݾ}{!+E^ _ІE5J>|H須6pF>hG` G+D=!}^L޻w6v ݾ=_@=}aۗb@"{e0U)K00\````2\yzE#- 1c'6Oϩ7yCb #İφvvv)}R,W 5N@A(2 ;mk4`{rgG+ە+Wsrا00PD86gk:o @;틩}=!m" ډU_r؝߽{wYv }*>|cQQ\%xƍzv""]3.mƺS[ϟ?eߏ_(rmV ³g'G#V .. Tbk0OO"QļVb-$:(j1w/[oo...?Ʃ9N}ā9l؎ sDnGG[ޯR[F-T5vRǎ~;b;l3@\5Ї?``N'T6*~O00SBN):iÓM% mEHƬl賡nr?i?;"J~ʩW̗OuX!yr/sEf.u]9~=|~=?}a8D˥mFH]ć00  @Q \-ՠW^Iu8m]bPA׮]۷_/^` Б@:ouvyo5QwV\""Hc_t xr ;7*ej$Zn@y\ l\&00001"M<>nriI~휄m1mL[5۷! }9^~ss/D ?k ,R7``MR[sWwhEq_{g `dgؐKf+ׁJ$I~7ERӂe= ]϶p;m%PK_Oe_|ۗog?D:8o [͛D'pM#SkLEE̍WM?g-Wc~/=#:HO<|0}2#ؐ q^`M 9Uђ{=#]϶c6]FQW\"W>a;r1=Pԅ׋Rz pm~(햕M;Vvx>3F? @ a0001X s6L.=]zU".l$'|v: reGѮv1*ǏtijʷH>M"\V]g~CR'qc|]9t3SZ3@ DmׇԿ} j c? lF6uX3vh-t+,yf&״R["'Zx7v+Zo9?m-nx+oi7{#s"\uJ ORC>^kշ*rs|/ @h}z:?o߾?0| g 0u@[xc? @D}͛7 Zǚy^I4OpM{vb6*\ڪZT߿OU H;j{:vD ް[$9["0Fh`` ʥÅH%OH&T3YcV9Zl@/2vGxw-n蓑_E ǽU"\mZKv}4!…c1?=hJ@تLm]`n+% m2nx`ڦw `c"*E{3 y$ƥMVb ]-p.\߱Hmc>yOmӖNW"r ,w4{CZxέ6Mth@ `a@oܸqjT @NHBO{&},J޽˦GQmL_SK_^׋96ܹSODy{œF:t">z􈶎[:[I`)Zx9TYή0-d !Y8[3 _00.`T* HLH Jrt{\.ts2cT&fo6{QW Ҧ&%;br8,?sĔ"FJПGY-aTqKqv`S2pKwa`}4Wպ'^ 6 d 4F {z''!LB<)Q ϳTR` ;`"(8.-5ޑ 5;n"t*E۷[ZowD*ҷڼ{ч0f* YEVaX[5_tH2K00pD8La```D9=N0Iݞ$ZA 0o y{QQIR{yW,NaM&o"cNڊtqqA4BڈTDwK9\.?NJz. aƍD˅0<.o{Oyggg%[+c1``>À6"`֍$XV 1m l@ an%̀>c׫4&h.O<٩-}4T˼~ZKV8Ms"T)x,bhT%޳>KϞ=Ookx00r;䠒Sa```}~sMHmLbcl|C@Kπ]jsHyG} .kEY;;>GI4߈pz6#nՙH뱷  Z|;N_"0,'\x$\_j BM$\ T5LdJNdx'EDvf?~%E]^%Hc*s%"mĜW{D}JwnKTdytCN%Z -bg+\[޾ $,PL00006)6F^x"ƥUxT+u)iW"|^_H6\A[S)gAKK߹"\ 9:7:y;woǾ. ehuK:nOZ{y } mvm04t0000d@JKמ3h}5 u(W 0""~B Юbڵk{ g0E8o|}:,Զvb(B bhpAggg9/~v@};|00Pm\~H%GRFQJ9ޱ# ܴتv [~ƖD]a5H=et[;G+q"<"mk#3^r P؛ڟK"7r#S\X3xMaZyČGƣ@hdha``` L7#N*2v&?رj$*߁ ::=_LS)X>z"M>m˹"\Eʕ_'W~z뭷}Aoyv"^$n-a`<Asxm\y-޽K=u y'0D8t0000`d -ZHܦw# "!m0޺u يG߄5EZxM{ՇO={FD $:OvV(mQ_pc~2YUQ<EI:'"I.,b@M^k /-  @_ là WƟ`g@"t.pnt_0ڞOl~aɓ'nLQg30O6cq~~qɩ7]=wzEaJbhzv^Zi:L݁VHD˅V-ϵk_ Dt0000ɀNQl>an*,e"A9ۛ4A迩 Ν;C`DSvڪt-_):"/}}p=16)hʕ+o+mv1@\mX~L;00p2E8[8oPa`` 6VR!6ojf韴?bXc2O[ז5~8k6jTVM}UD5""{:_:Pi :bC7Qw``5 1;Z~|P7CVvK00r @NR!:A^[ ZNH&~+y&LFc cEFK]Z[|Ev9%WuBc \$8^G@L/H9Z/ u@ä```f5NRE^ޢ ܱ+v]ʀ"S$j%D^:;w SVC>iצ !EqHs^`^:'Ux"S+#umcz:fEW[%Z BD@k< }# @:;=MuU2dj`iE6RH߾Ʃ{?ӂՂSUnK ~s8j1y-BN^x:OP+ަ6@??.('J"h?w:Hjs|"Ě K^"ϩgDKU/?c0bߖ@bRDf=.^ߧ9ѩڵk~ʺfDf[j;5f{hĸDʅ/iR4RjmĨ&_~Pƻ%F~T16&^A S[o5XpP~?9]rD:|#xVi9&; l`8h):G3{.ӭh:`<&~b;l0hBD!?``` pjCK#nq=Z:2E[jf`ֲM Hh^P=O;0A.u^%G1̗~ۏooߋwu@ ws?om7=2n(p/GLBc"uUKhsuOcsn%y%sḥc~Lbi؉rVk|À5Zĸ! 7a<NЇNފfySk00?l000-0=%&Uɍ͹ׯĝFPf=(g@ +ȴ!a*8מXJm}?I<3UθN>Lm~jPf;ljl)) F{./鹒Fi$C"\q2: pUdJ PuY4&O>+yL:lAhڣHx,@N\cc f,}"]Tx͠z,͖pIO>(o$xxN=H[[Ol[a`zPVb&ksS/m7ykyK,JE)ݯul&6` Srֵd5 03V4Y5V8u+BK;Şeg% 4!YR@RA```04gүg#3o(Zf0E͵ʙc;[[QD )‚6MRYb% `;.Y96c]%mK%@Ey:i$\D۱e#&:СWL0641p9ZR1njz4KG{9Z#Vݩ}+Z$С 000S]+lPbJ( ebL7>ER|tEvmAm9Ra.|vgjm4f;EjCƂk۪*VWɾԻi9g9:'"\9;@:4>xMWk/\?:iڔ eC L~yQ.Xn>XsnP{ a, 4!±g0003?D?ąi]q }Tccw;|ç.ߗhC}{"ȭm>8@(-cz\k/K}l_*3"ؠa[XiGǏJDnռ؁%WNZY}<_a'WJQsm(6|Ƒ80 ; @\``` <2w- ğ5/HP[d_~+bx&Xᄹv`˿ s\Zo|5b*#ؿf;RDk#5 ߆ >X"..v6-ާ[RYsE8ژi-Ÿmm=ܷV>\z3BHzzߡ @xN F$TvXMK/||O?"AIۼmK컽}kEb:~~0A|QEԏ[Rl4Rc{ç{@ VW=Ҭ5M}h~ی3ve@sD5扈r4֟SulnC!~JؖE:F ~vn`zokm]~8 ۆ&⺿s}Dzea.mFG<_"n,B>>Uo"FKV `o 3ݏ?b>Q֭Pʛ&62PSK\݈.ڭj s@8b]p0p63WDT"ǀonT%X͵?PāO\DVt8 ~ 1}ݚrǁֆ6]1[r}s߄afجhk[I|n^7ij?ImH(h[c( {`.-6UZ@Z<{۴CIa@p{V|}VkGٰ5͞6uFZ0ϟ?Kl``}4Rt!yj3Fyva#3qį5ڶo"ʵ:о"D!iD{a`<lt5E%(_ )6 ܽ{wE> K+:[ ŕv7i?_KQ.uzH>?˪6f6HK!aM:1>f !#=7ZwlKkՁn/tQ kZwn3z(77k Vl֮{HlF: E 000-f<yAnZds<|Z P%_% j=iCY1ڤmuAҘPZ0mgZooY&qeo` .Zտr@%~i`;u:w ;kϰrm>cڷ]\\0`9>3]?ė1@H 000-gȧoG^PhͲ0Yׯ/6p&L1 q|X~kHss9ͤQ%ykp;GI?9aiz1Qu)ZC}o5k]yJt;i_:5Gꃱu jT7=sitԽ|& @m1I@%40tē^ߗ3'Ol຅ a.zzj%zl"!wt"/۪O{N+ߺuk==ۼR4Na`,?g^2<{5KOMy_36r <<̡i[(/mڲƭjU^7X"16΀"ÔTZ.E҂cI(<%jᅨd'Qƈ//ٴ!b.ˈ2oVHRCpf-Q.oD;f@![^e\"R_J޷.S[FFc)|뭷#6j3ڵkmk,|L{nm0n}Ǿ2 aexIuuqOz oL9F(w=?Sڱ2ۋ`?aE88Tl``za i!2CE"m^-1 !簁5MR[u!1{^"Eq[meIYE˯͹">"0ǀʭ1D>aWr0 @4b8?e1wamխjg 2 q2Gb?00#|naf4W${r21ņ>]6g N}z ۫C@Y#8!pN+a";ń>fzɸzuÀ[^𴱚_U4|{@yVS{ZsQ|[޷ش=BǏS8 `` 000pbɦ'W+rxU{h>ӂڇ>?zۃ08Zn9ސxj$[:!Pu%L>lW!/0 j8 N5%6e.M{ ~8yDgoU8VkѰ; @9Šppr9'cKl 00"X7'|^†Z´=lF3iql3R׊rGD(ƶ䷩DlԧUK\W\Xr>PR:%ι5֐u#cW(ˀO#}{yմu I&9P5600[D>MQt-'[AGmۏވsmć>Pm׶)L[+jeUKڶSFrdVD*JZkY鵮hm<ξ)0]t]@snEi,0Csk 9 5oU6. i"ʅ@ͫ-%1nMLHX JGF~zŋ;=_bS[:WʋƁ;J$N/-bh6!?0\&wih`rXƌ霏y\r9skܪy}}HYi``MBpM~(ɀ .!XHbd%0/q%=*ǓXT_]ރ07AlY HJw]O~ּ!!p5.>$f.=5e#@4qn~/r_ruy6)C o[pm&G>RJu/a"3(000!(+BxWWzU*b\&9'93|RuTAKDǀı$_؜oxfNNP1n Jnj̞]N|"GRc{>|Hǚm5ۑ;e]4Y{M ~v+yc6 @1ǀklhSZQkFab2l:;Ws_Eޓ-3s6}8dhԧfe_ZS)"Dxb-ՙRYWviɇ6\ `4+%E. iG~mQ` y|_x*uhg<~9>D88H``zb@Q¼,oĸq ȦqiczjcF*D\K|߿yi˵y-QHS-Qⴱ+aJW [Kn߾}0o"ZyKFAuwQ j݂}jQkg)nDM3߆-k@ [7Jܽ{!И d22 3 h]=\jCc=I92[1.mGkm=̬Ksm᩿WKVj%̃-ŀK6[%^=SJG;h֜W)H9(?LN圼L8@ q">8ei:R ΀;J,~w ʅG[k8w JokTrD bsom 9ug- 38W>9%}Y595ua#hx狇rczϖܪDl{asl02NL„ @)ͻ9?U1n횶AK.UyO<ׄ_=k/]t!Bx|Q)$w==̢=PPm}:XpMy>c#蟁-Ĺ: KY3HitȄ9Nu:a8dI;J\ƀV5o`k@KtDp mh12Zƞ01pem[ޜS'!.%6_-m)Jw mZyVd"C@XB$OcƮ>{b7蓁5ŹGe{Ic LAg>Pnx2[Gs2~t;ñى6 ,a gIaHKefG(q*͝;wp mC j-m ˷%[a6E,E과gg%V$APA"Vd2c?}2>sTUŧYQs=D~W ЁlU{TNlL=<A{;a'DDGŀw3b]/b4P{Q}9g҇%~}Q6gnCE%'mT <<=9tJ2זwD2jVVmpQW wڶ(m jKNߕy`LQKePwݎai_#= e[Վh/c7 @``` 덧 sY $+L*W (FnDݹLh]:{|3sJS۩(^zC_X;j3Z} @dsuH8rj-W8`v6|[b@/V;ל[4G_A=hP" G^000o^hK rqkEdĸpJǮ3hNDO_E_dnm;:py i[07|hۢ4ȗ+pݤ]E9Ŋ[077v0-^q㾺~ǏcC#' 0`d@W95c혂qA?z,~/% l8[ckb1n MYh7-.*^Qvif0iag`146S-㞥bNTd~?ɚ6 B\Dslgmi @8Weq;xBv+w"#ʍ?WlUMu?00Dra``00wMΈb4V4Sⓑmd#d\jھ|ٗ}Y NQ$z 'Œ'h{b=LEb[7_xA{Q{'&.oߡǮ@ s%¿{P0 m8o0/_@oԩ6|sV5niw101`l*@ _ bw9(@p{X4Kla"4&O,B\Xou[m$A-Pvgݖ# }6{coS\<r aݻmb}3۷o6q(3#10}W|k\mtHxE.62qIDr/GRdk%DY&'6ڷo;~8$U|Wi9Su:oa yU o}X=2i) Ta``0답ᮈ"q;0O q6a16˾#UqcQxFB܏+Wmͮ="SbhVQ#勼n TA~J\/G3_)n;xfH) #~'h.[C}Sy~?YWm?~;o_ӟn_w~?~ʛc1ݿwͿ7C.q'?Ƀ.C/.?gOOrl#~GvO|b~]CNw{~o]Ϳ7w~c~]տWwW_٥_y_Ki{Bmę?^W~.~o-M77߸K_.ta_kvK/}CvWW_+~N_~٫/ſx~߻K_ .~/y?ܟKٻ3w~wKإ6K.ۿ}~?'~MM?KooإD_]}W}.~Oh)h}~.m~˵zv|ENnd)6"{<>v+KMbk5i)R谊&9iߎ^^/(ZFS;|ee7c7,Gilø07s*Fc}mvFNs]R۝;6h9K4}=mk5ڤ/]/J?Nʯ_kߚSh [5tv;]㝮`;]'NkZO:7]kAOצkӵڥʐ~ZNs~.~u>J{Oki_Not`0kAL&{ӽGпi/D;;竟Kw}ݔn+=O M~ӽtojg5J[w¦{dӽ?._ /OQOuݯ7?N/N {=˴/s~|~/NH{W~].Wn}紧t>}=间@HG0!cV Ŝ.㘞CZ/iOrh\GKҔ{) S3zF|i"jrkCXDDG @NY뙹N܊FDX+VAnz>6{ @c 3r Q˝/+I+XɃmGw)[d@omK!J:6-X(ZVkS4ֲZ(ZVkS4ֲ4GakY-'?Xj#ӓKY'?Q(ЇG(QʰV>#vo-yDm(77 Zo^{D#y#͍QZ^IϏL$!%Rn8oxM; Q(m A{6rƲ3&j`3ʁU|X9 #])ہ+>( ʵhX{k~"=o-TVk^eDzZ֑}ֲZHXb-%ֲ4GDÈrQq =nDm(m 1! r)4D=lDr=}\Dn"pcܱ <ƺe,D)'VOr`,D)'VOJy|'֎d|KfxEbӶ߰p0mektw[j﷖߯m-H>kYھ]ֲZ5Że[oXjF(\Dn"p(QҠK܊DD \Drz:ADr= EQ.\7`(iYVBrza*X)J+VBrza*Xi455˶廭X-˱Ʒ,kF~|lc-ǥe-/-g-߲,e-H>kY}i_n>kY-X[ֲZ5;-e["0(QDr= E4rb s󝸕 Q.\Dr=m4\Dn"p(QUO4֍,`!J9=r`,x%`!J9=r`,xuOy.R\_quKVzEbӶ*Z߱p0meksZj(Z߰߯-k-H>kY]ֲZ{e[aXjE(\Dn"p(Q DD \Drz:[Dr= EQ.\7`(iYVBrza*X)J+VBrza*XvO}SGnWR+ACKV*cXZe*] #ZVk_5c-QRߴߗgX:ҸZVk_ߥi-/Zﱖ*SZjK{"p(QDr= E4rb s󝸕 Q.\Dr=m4\Dn"p(QUO4֍,`!J9=r`,x%`!J9=r`,x%͔RtՆE)ߒ|X9iK!Eb"oI,dkYNk-]%߷/[ֲ4[j+BZkY-}-Ƀ~IjE( \Dn"p(Q DD \Drz:[Dr= EQ.\7`(iYVBrza*X)J+VBrza*X)JS(޽{7ofGQʷ$V,}ڒ|EHkX[ #ZVkgZjkoeKV;#e}ֲZ%ZVK|K`)_i"0(QDr= E4rb s󝸕 Q.\Dr=m4\Dn"p(QUO4֍,`!J9=r`,x%`!J9=r`,x%WŹm߶K|Kaҧ-WኵOP%yp0meK|V;v|ZVk$oZjkoeKV;vٖ~ZVK4o[jkm"(QDr= E4qQ.r1(Q.\DQ.\7rzA(Íes*X'JFU|X9 -BH/YֲZ[}kYNk-]%߷/[ֲZe[}kY-ҼNo)]%G(\Dn"p(Q ݊p̭ wV2Dr"шrzA( \Dn,;V'?QX7(ʁUS4V(ʁUS4Vr Qʷ$V,}ڒ|EHAʧOP%yp0meK|V;v|ZVk$oZjkoeKV;vٖ~ZVK4o[jkm"(QDr= E4qQ.r1(Q.\DQ.\7rzA(Íes*X'JFU|X9 -R9=yp0me_EIc-RNO>eee"p(QDr= E4rb s󝸕 Q.\Dr=m4\Dn"p(QUO4֍,`!J9=r`,x%`!J9=r`,x%`!J9=r`<OTNO^,dkYWQXjӓkY'OQXjӓkY'OQXjћkY-7OQYjѓDr= EQ.\7rz!M\`oAKp E(QE( \Dn"pcܱ <ƺe,D)'VOr`,D)'VOr`,D)'V,}'?X8iɋlc-*JkY}rza-)JkY}rza-)JkY-}2za-)J:KY}2z(QDr= EQ.\7A[ĭdr"Ei"p(QDr=X6wO~ndY Qɇ`(iX Qɇ`(iX QɇKO4}Zrzb`$Xj=ZVk|Xj=yZVk|Xj=yZVK|XjyRVk| EQ.\7rzA( i{ \"cQ.\Dr=-\Dn"p(QUO4֍,`!J9=r`,x%`!J9=r`,x%`!J9=r`<OTNO^,dkYWQXjӓkY'OQXjӓkY'OQXjћkY-7OQYjѓDr= EQ.\7rz! ܭ'ܪ|'n%C(Q.\O(QDr= EƲc,x%u#*XRNO>X X X4O~"p`"ӓ #ZVkU4ֲZ(ZVkS4ֲZ(ZVkS4ֲZ(eZVKStZ(eQ.\7rzA( \DnHW/؛o)7r"El"p(QDr=X6wO~ndY Qɇ`(iX Qɇ`(iX QɇKO4}Zrzb`$Xj=ZVk|Xj=yZVk|Xj=yZVK|XjyRVk| EQ.\7rzA( in@8AV;q+\DrzhDr= EQ.\7`(iYVBrza*X)J+VBrza*X)J+VBrzaҧy)kX86ֲZ{G)'ֲZ{OG)'ֲZ{OG)7ֲZ{oG)'rzA( \Dn"pCz|(H@(Q.\Og(QDr= EƲc,x%u#*XRNO>X X X4O~"p`"ӓ #ZVkU4ֲZ(ZVkS4ֲZ(ZVkS4ֲZ(eZVKStZ(eQ.\7rzA( \DnH(w+ 2*8߉["E(F#EQ.\7rzlX DIcȲ ӓ+V'OQX9 ӓ+V'OQX9 ӓ+>͓Hi,XHH{|%>J9={%>J9={%>J{%>J=@( \Dn"p(Q DD \Drz:[Dr= EQ.\7`(iYVBrza*X)J+VBrza*X)J+VBrzaҧy)kX86ֲZ{G)'ֲZ{OG)'ֲZ{OG)7ֲZ{oG)'rzA( \Dn"pCD[1NUNJ(Q.\D6Q.\7rzA(Íes*X'JFU|X9 ͓Hi,XHH{|%>J9={%>J9={%>J{%>J=@( \Dn"p(Q ݊p̭ wV2Dr"шrzA( \Dn,;V'?QX7(ʁUS4V(ʁUS4V(ʁO'R >-R9=yp0me_EIc-RNO>eee"p(QDr= E4qQ.r1(Q.\DQ.\7rzA(Íes*X'JFU|X9 -R9=yp0me_EIc-RNO>eee"p(QDr= E4rb s󝸕 Q.\Dr=m4\Dn"p(QUO4֍,`!J9=r`,x%`!J9=r`,x%`!J9=r`<OTNO^,dkYWQXjӓkY'OQXjӓkY'OQXjћkY-7OQYjѓDr= EQ.\7rz!M\`oAKp E(QE( \Dn"pcܱ <ƺe,D)'VOr`,D)'VOr`,D)'V,}'?X8iɋlc-*JkY}rza-)JkY}rza-)JkY-}2za-)J:KY}2z(QDr= EQ.\7A[ĭdr"Ei"p(QDr=X6wO~ndY Qɇ`(iX Qɇ`(iX QɇKO4}Zrzb`$Xj=ZVk|Xj=yZVk|Xj=yZVK|XjyRVk| EQ.\7rzA( i{ \"cQ.\Dr=-\Dn"p(QUO4֍,`!J9=r`,x%`!J9=r`,x%`!J9=r`<OTNO^,dkYWQXjӓkY'OQXjӓkY'OQXjћkY-7OQYjѓDr= EQ.\7rz! ܭ'ܪ|'n%C(Q.\O(QDr= EƲc,x%u#*XRNO>X X X4O~"p`"ӓ #ZVkU4ֲZ(ZVkS4ֲZ(ZVkS4ֲZ(eZVKStZ(eQ.\7rzA( \DnHW/؛o)7r"El"p(QDr=X6wO~ndY Qɇ`(iX Qɇ`(iX QɇKO4}Zrzb`$Xj=ZVk|Xj=yZVk|Xj=yZVK|XjyRVk| EQ.\7rzA( in@8AV;q+\DrzhDr= EQ.\7`(iYVBrza*X)J+VBrza*X)J+VBrzaҧy)kX86ֲZ{G)'ֲZ{OG)'ֲZ{OG)7ֲZ{oG)'rzA( \Dn"pCz|(H@(Q.\Og(QDr= EƲc,x%u#*XRNO>X X X4O~"p`"ӓ #ZVkU4ֲZ(ZVkS4ֲZ(ZVkS4ֲZ(eZVKStZ(eQ.\7rzA( \DnH(w+ 2*8߉["E(F#EQ.\7rzlX DIcȲ ӓ+V'OQX9 ӓ+V'OQX9 ӓ+>͓Hi,XHH{|%>J9={%>J9={%>J{%>J=@( \Dn"p(Q DD \Drz:[Dr= EQ.\7`(iYVBrza*X)J+VBrza*X)J+VBrzaҧy)kX86ֲZ{G)'ֲZ{OG)'ֲZ{OG)7ֲZ{oG)'rzA( \Dn"pCD[1NUNJ(Q.\D6Q.\7rzA(Íes*X'JFU|X9 ͓Hi,XHH{|%>J9={%>J9={%>J{%>J=@( \Dn"p(Q ݊p̭ wV2Dr"шrzA( \Dn,;V'?QX7(ʁUS4V(ʁUS4V(ʁO'R >-R9=yp0me_EIc-RNO>eee"p(QDr= E4qQ.r1(Q.\DQ.\7rzA(Íes*X'JFU|X9 {l\gQ{ήqu]G_]PnA:Ͷ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLA@):or -(\]PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*&ܶsSPn灂r -(\[P㛂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~TIDATNT+NN)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)EJSJ J}F7{78N܂r[P㛂r u|S1nq@榄 -(܂rktA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:\^pMASn -(܂rf[PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My ܔz7YA[Ps.(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq((܂r umA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:r78 sSurd[PnA5܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTL./m禠)7[PnAN-(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7b ݔM }[PnA:r u|SPnAo -( YܡOJ ]ȢBN' ,89PP`!ERb(Ƀ4'Ӓt:RmVsC~Ƀj)%j>EJSJ J}F7{78N܂r[P㛂r u|S1ඝrk8[PnA:Ͷ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLA@):or -(\]PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*&ܶsSPn灂r -(\[P㛂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~NT+NN)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)EJSJ J}F7{78N܂r[P㛂r u|S1nq@榄 -(܂rktA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:\^pMASn -(܂rf[PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My ܔz7YA[Ps.(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq((܂r umA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:r78 sSurd[PnA5܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTL./m禠)7[PnAN-(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7b ݔM }[PnA:r u|SPnAo -( YܡOJ ]ȢBN' ,89PP`!ERb(Ƀ4'Ӓt:RmVsC~Ƀj)%j>EJSJ J}F7{78N܂r[P㛂r u|S1ඝrk8[PnA:Ͷ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLA@):or -(\]PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*&ܶsSPn灂r -(\[P㛂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~NT+NN)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)EJSJ J}F7{78N܂r[P㛂r u|S1nq@榄 -(܂rktA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:\^pMASn -(܂rf[PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My ܔz7YA[Ps.(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq((܂r umA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:r78 sSurd[PnA5܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTL./m禠)7[PnAN-(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7b ݔM }[PnA:r u|SPnAo -( YܡOJ ]ȢBN' ,89PP`!ERb(Ƀ4'Ӓt:RmVsC~Ƀj)%j>EJSJ J}F7{78N܂r[P㛂r u|S1ඝrk8[PnA:Ͷ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLA@):or -(\]PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*&ܶsSPn灂r -(\[P㛂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~NT+NN)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)EJSJ J}F7{78N܂r[P㛂r u|S1nq@榄 -(܂rktA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:\^pMASn -(܂rf[PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My ܔz7YA[Ps.(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq((܂r umA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:r78 sSurd[PnA5܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTL./m禠)7[PnAN-(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7b ݔM }[PnA:r u|SPnAo -( YܡOJ ]ȢBN' ,89PP`!ERb(Ƀ4'Ӓt:RmVsC~Ƀj)%j>EJSJ J}F7{78N܂r[P㛂r u|S1ඝrk8[PnA:Ͷ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLA@):or -(\]PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*&ܶsSPn灂r -(\[P㛂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~NT+NN)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)EJSJ J}F7{78N܂r[P㛂r u|S1nq@榄 -(܂rktA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:\^pMASn -(܂rf[PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My ܔz7YA[Ps.(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq((܂r umA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:r78 sSurd[PnA5܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTL./m禠)7[PnAN-(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7b ݔM }[PnA:r u|SPnAo -( YܡOJ ]ȢBN' ,89PP`!ERb(Ƀ4'Ӓt:RmVsC~Ƀj)%j>EJSJ J}F7{78N܂r[P㛂r u|S1ඝrk8[PnA:Ͷ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLA@):or -(\]PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*&ܶsSPn灂r -(\[P㛂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~NT+NN)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)EJSJ J}F7{78N܂r[P㛂r u|S1nq@榄 -(܂rktA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:\^pMASn -(܂rf[PnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My ܔz7YA[Ps.(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq((܂r umA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:r78 sSurd[PnA5܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTL./m禠)7[PnAN-(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7b ݔM }[PnA:r u|SPnAo -( YܡOJ ]ȢBN' ,89PP`!ERb(Ƀ4'Ӓt:RmVsC~Ƀj)%j>EJSJ J}F7{78N܂r[P㛂r u|S1ඝrk8[PnA:Ͷ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLA@):o%Acsi 7t8@yꩧ?9:FC?y<u׿޹w;@}u_}'C?coo}Ǻ;{g{CG?c~:~su_)_7s1vm0>uoܜS ,Ly{nt饗Z>i>T:~o(h*flmHOJiF'Z{衇?mfsI?6P_Nb…ß5b7kmPkC?ck] Q(kv.gzn ʭr<0t=6&81F8ct\KM\R\EߘN_tiҬIG6)P&. ?|D>o +C!EoNp퐘cR\hկ~eiXw]tQOXϛݵ^kiXWn;3N:i͝Ir]wqW_=YmN?/y7~(W??uZpp bvZIwZW"ZT=Iw(04Pg0Ct] C}Xꃹ ]_җ}?>МkC}@zкoӖ^Ӗ^⃹j s&6T+Ck s\ن K JкV^uIs׽u݋_3ʽoXO?ݝviݻN\ {5tw- tk=IYgթNmR{w{Ī|s-t]wj#)7i@&պf b+KׅO9Ǻڨb1)P2%o#>e,DY)|4sN)p8 Z_zFo?{)M(.NrC`+hTx;ݮ/O}pԣs1`6\2շVAq6@|ouo8@Q!}3/r5TsNj)P9}ӟ KG_r' A)&}7`#<؃r5;Irł`vCni{Nh%;s-`|߇]X9X&;XPgL^ι sSP`aokC}0ѮRzt\bD|<'Xӆ%SОUm~?7ZiZm~P6mh_rmV%uG~Kۼ/r5^Y1So~{`{{yTϏ|#;=(m ?>Ovz><8F_q%Xϙj+rogڱZ;Lrr=ؿԯ4 m];Nr 6`r_] m(WL `ncAT6M\陊~P)4,W굻|S=P8Nq rk llcpn(w +-@;!Krǂq;6(WAlv CK)A0;}G.} ̽;-=e\Nrusu+_>۠ܡ?>Nrǀ_rr;Z4pUt,xJw^hZ\0w܆rPꃹጱy܆rо LYꃹ g]=w!6 <ڿ)B׆׆`l܆C}Xن U\ن K JкV^u sE@6۠\# 26ܱ۠`jrPwmP֤ƀ u]Ҡ1`ֿm)A:W.+g%ۇrǀvK)m/\7w,DYӺ5tZp;q@XA7Ŧ\͉q[(mcG\c4HruU WWrmPnc}(x&m\!9`Jj~pPKr0W&E@vvru0W 65J wvuw -} WS۬\3 e6l*>볊[>xX簠)7r^;ESp(W=o B: w!U%s۷V\ڹҤ]3qh/`vCShr>;s5Vlr3NrI}kAw}`0W' 9oc|@կn|4(~O%i]r!.ڤ@W7t㬍/|{ۆv8G&9:n){U(kbCvUo:j\ + CS->8Cy%ֆ`.8C*=>=?Qz(_Ssfsyu__ T/\ <֥x- wiW@nU6mSZz?JFZ 6)POQ6j mPxǰ Prd\1 ұ &S<\FsyֽOb2^C޳uLs{ Ȝ[p=j.(W޹hcNqOrWvW1[g &+UM4 ]rvWY);嶉w)w(T}YJƾRsus}(W; >*PnHAY~}(W5@qUW'> Շs=;SͥIr \Ga*o>۠\~*ύM +BAh#;s-`(up\AjhOw(pZm3T#({XЛ? ,ߤՆ`.8C5\#ň1qI}@⛴PFؿHO[zmhO[zm6ߤՆjfs\{Rmh_rmVI]}T+K њ>} AvTv ~_-4(W^xgj mިR\Fpq~pT0WW_7P<:U>)Pr=)Gs-ۇrxS\Fo}(h`qU(W|푫ǧ@ʋb۠\}?)(WP0WlDwq,7|(4\0׉^|pi稠sXI|rkj.S7")eDAǭwSE;5Uum=&CvU0$(MA혫: w,*'}ҷ!vCI q1o\ѷuc \}i'ضKud7vV$(WO)WPA`nf>}n&t$(W~{7\j] jWU{풫GݠO' ~c\*o$\qmWoڐŝ90TAA`A`/?YܡT9 hݮv=XdX9&Ն`N8Cusڑ]9Yw<hI> =%SО]%mhI J=͒6/6T+6K~PӺV^5ynӠ?~ϘIP<&m (WCZַKLR$(W0z9샹gׂZK袋3<)6&P.IP{uZ_::O} Nsvm`v ^ ԵO}(Z\{e+fBZۀQb\FC8h\age T$Agbc7w)45 z!-9֙VLq@y`n(Wo{O59hopr[NGs۷u4Zr $ۿi\DRU(w wC^gHФX?9W*ׇrAF5u^s]Ljg>3}]rX7UmҠv宂P/^:UmҠ\jjoPk_v75Pn&8UmҠ\t܇rW>; j*uPit< | 5+ YܡO pjnW>+x+ߤԆ`n8C\^5e/F( ,8I iN]7iK iK jĹ ojC~fs\{6K~P;uIQ/6DkFPҠ3 }ӛ0W}(W WWҠ\u ]sz_ۇr;;*7 mPo}?ho(풫Ǜoyos*ߤA6 ]뺏Fru}r < Um{$0Wl@mT)4(W9 rĽ^Lr_i1 sFܖaxr`ۣq\S]y ݔM }m=$wj ir:0?yCԵIrM0j2ɞMC)'ܤAki'[7P>Ikv"<\A |w>c@69&0W@nwqލ.o}SPrۇrK\}}(wʛ6Pr;3w~k^@᪚ c\MuЩBKt9,ljCyTJCcHvm6gFvj;NO{;GXp}PMAƺ)wn ojC}@z[M}@zkC|@{kC|,mnCvmV,qnCkC~fs\{.)>׆hݥM*+x{`^>-oy N8a t56U mP?ӆ' w3>PjUmz{QMѠ\NC"j чr_T(W9=rܩaVT(W5\ m풫!>PruY{p6Pn d7Jrр\= mC;j˿KHs&ʕVvb)ŝM.`(σv F!;Xaֆ`Spj|N}{SP`aL·]M:71b5 cr=P6. ׆׆`ӵY܆9Pߏ6/6T+cj K J$V^u6P< գ`6aƧ>YR\E7/½r5чroYiB`[ne* t^ŧBUM={*qhPSgF;E='(>Os&ʕV .So?7>zs  Ri\Vktpe=gC;gک;kn;uRiP;5g] ݤM|m=nFi(5՝rf\fds]p PՇ>)sf Pj^szt(yFl>ۍJr\$:ݣҡ\Mw/)??fM:]ݩqnTKwcC9jW++{,P`aaԆ0 W(8='Xפ>T9PlPmh=s1BK}@)|sX> =mf> =m!>=m!>8,anCT9POU%mh_rmV裡R6/6T+S0}T+K ѺKs%@6|[gO(Wѯ܋.ӎSg=\5N?Ov6P Zf\9Nrֺg97j5Or[m;7Mr:0w o(i( }.zr0 jGa@mIӜ|)Pn` rnXY(WjW}Hsؾo*{Ne7qYꠟKgvKr[s/&|(Wu'0WoHm6mZ߄oa-`m7asMԆ,P`a7]Â3T㶫&JͦkC}ppF3K_ȵ Sfӵ> =mʺF> =m!>=m!>8ڤmh7 JI~POY K Ju9yJkCf)Pnr_/:Nuϵ(W <ǹ_ \_{Hm6hg\<7^o Pn?W]gn=(Wuhl:k.U}0wLI;<곩l=[Qf@yBG|S^Trsǃ mǯ.;7n3AM>aݝQrsCц>F =OvF>0sB>|h?p׆ni!Pj<֍8כ9\ކ6n殍nkC`l O~qwm\{]Zp}3'o[mg׆~6?[7;N  շ8s;|+8묳g}͜Ptr){C7I8餓6N<Į~];N8a?7_e}86w=~R̜PvBTC }. ֆ, Ex6:?nm\gN陶N݉ K ޳Dюw7T,,6 iK iK iK .mh_rmVwmnCkCڼmH_hݵyڼo[6{6tֹ57{'mwؿ;S^sSO=uvi]u .͜Pڽz@['_кVzY|;w۵q%t;ծ#%_kAצkVmKm}KmKvЮmq{kNmrnZlzo5宮꺰nh-Rr~s_y:w-|ݺbZO׆Umh H5'5u|PN׸ CnuOʕWl͑[xw6E"P8EV)qq@fJa*{s(wu q76h#S-jCuhs$PjnbRC躡Gϟ $}h[7  ۆ ꃕ1d˽ۭVw߽?뮮 /~nκsB)m5~s?z~?wm\y]_~ye]ֵUj_? $ysBugOD}?ݵO~kD׆nѿin&o0Ff!.Nmr?uo}[v/hC?1Ʈ7 ];׾5yMƫ^W]x+6^wo~󛭺sB"a=*3t!V=+έ Y!kC|8xo\@!kC|`A덻9jC׆oՆ׆+-666!~!ZwmG6/}GڼmH_hݵyڼo[6/zы6^wm~sOkN(W*{{{~۽=vYo{;񎮍w]{ݝ ^wuBʷv_S5 k5/~]6هƍ7hfN(WtR_Yײۚ暽6ѣΏ2'+ 5 k5˶u6jP֛ԜP־>֫_^}кxͨ~]tN(Ŵ;e(*nf& u<@ r6EZÜL/(vʍҠM6¥Ar\M`Ƴ P>8(Wfuj P>;(׽qP\}ͶCZM66dqgp`,`!pkC|T8}@!B ֆ+XH\XXzmgK3f厩 ې~ks^u}tnC}D|CmH_zm]ͮens Pe]r*7 nun P n;+Hoۡ\wi]u=\+`^q@+M4(wŞ6¡PCws]r۷Pv]Єw}S~͜P>&씫on:շnvnNmr⊈rʭr?•t!'\SkC|@ A ( `a鵡 3 'F=?^1`pƘpf.pe6/6D܆V ܆׆hݵyڼo[6K)W&Nn喈ro6ĜPvM)Gj3'G낧sBZCL)mr]hP&7rs*f_Pn.0'ߺ0윣9\~.=N is;Q̜P_֍՟8ߏsG4[9\yg9pC4O?twϫí͜PɛOCc=ֵPFxnRA|'?)&}%Ws\u%.dQp0k^[u7dq K jz K 1smK ^Ӗ޿(p׆mhmj~ɵZiO;Zi_rmVwmnCkCڼmvmWs œ;c{?7[w֣~ϹS9z5#kYਆS ZC"kQ:Hk[кfN(WG[$k j]m(6tM{ &/^&T9dl`(D\7YuͭkYf5˦ fS}Ncs;6rwsCmk r[ןa Ow(06Î';X8lmc^ cr;X ,1O}@1y%RP`!AinN)qht >=)%`jC~r’kC}@%׆ K iKr> =m!>=m!>إP/7T+K JkC~P/6T+KrZI_zmV\rZP)({pm =6\rMA -y~KTҺ"XPnAS=%P\C+?S[P͂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~NT+NN)1T+):SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My܂rEJSJ J}F7{78N܂r[P㛂r u|S1nq@榄 -(܂rktA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:\^pMASn m<oϻ~ZQ:4k:t.(NJ_=SG:F5ӟ6ڤC?~׵m<{^so~otmH}]{=<@wnWڸ{|3y׸O~򓮍뮻Ӹk5\ӵq7ڵ?߽;CG?c;?7??kC m|k_W]~vPog?YP=Cw6!Ϩ6FuukCw(6X[yFK_/<(ԏ9PP`aڸ́ n^-0jC}@zغv> =m!>=m!>tm4~o6T+ck K JTY܆%׆j%~l]{G~K|SdCZк]t _ܵq%tƥ^s=VmtZի?iw'v?9tL[[>ƭڭm.nmorMrۚԣ>:~nu<#]zs~sC W:O>㝜1Z֝׍~zo!mkz_uߣVSٻk>C׉fv.Ak4vqVC]6@yC)?H:~qT\K@"VӾ7:mBocmrӐ RTQ?ܖ&m:տ :d\y@H@Bt\7{챮 }Hp|*'}8Mᄏ뮻ǝwٵ_[7to}tj|tM{ 6.첮}{]fiFn8Ir_guVO|bo~N8S;SN9e|]'tR&ʽ _׾5yMƫ^W]x+6^wo~󛭺PioEQxF1dqg 9}{'Wڮ YܡsRjC} 8C?c&ޟ9>SBm6g5b밮ZsRjC}@zSŤoӖ^Ӗ^MfsߤԆjީMʼj~ɵZiwjԹ K JSyJkC&m^um/x_o}Lr ~s;o|c7i-ox[ڵ񶷽kxGƻ\AZK9s~Cj3ص/ Nm\/`o??6ꮍmh9Ir+Wh#67o§PZ x≮ }0D"+>}PY *O,'BmtL1$"mu9uq5beGgASnr\{pN(Wuc@CZ4(W5)\PrtZ4(Wmĝ=jK߰X(WtڤAW^?ruM+h@X(W7$A{ oxCc\zo:IrmM!^#;s/`htڌjC}@ZA.~P sSP`aɵ>97.M}@ꛤPFֿHO[zmhO[zm͒6S$Նj&iG~P,ynCkC~O6ZI_zmAP^2>h+P';}̪M{WtP_iP䎅r+~NmҠ\]ϴهqBXڤAұO@ $A[qd]4 ]L溛RiN(w uXT1GIF(Ȭ;]j-r^Anp.(T@\iXqkuJ@>L ?m6 wj(4 SC.̝)ܡ?:oHrvuJ(wvշӧN1<%;s.`>ۖrjC}@h0Cqw(B%׆`N8c0C'K}@SZmHO#uIXXzmhO[zm6ߤՆj&mG~Pߓ,}nCkC~O8ZI_zm6hoӆ6ra\ZSʽ{IP~SBKۀܩ\'c\nտr R!@{o3uaeE h\g*xM{)7sAs]d]r( Uם$$A~r r0҇rg8?|x?0w >x?􁽍OuC;?JoчrXnagڸ6ڮ -)Ν@@7t KnrQw^}|k_8s;|+P[(W7E7[N|;N8ao|/ҵkC?gƻrkS~B~x*VpBՓ,̹Vֿ7dq $PP`ahmZWSc=b? CsJ \pv:zXӆE%oӖ^Ӗ^9k 7iZiZm~P6mh_rmV%uG~s׽u]^x93ic=\=㻓N:io8S6N=ԮN;kCryu>uw]tQw󝮍~]!WD'9Z*u[n[OӮ rmi?%OS\˶u>JyJG];t]X7[(WZ^r'8d<3]J瞳kjzffb&G?|nl|SA:_>_^Kul 2R9sPa넝r*`´S3]K&c@5Jr $|1)P>\.cяMr@Q{CM {9ծ/6 PPܟ= [;+NU3Ɨv.St,P`ah>> Xh}hm8 CsN \p*${ ?8 , 16 Kj> =m!>=m!>60~okC~?60~ɵZiZm~PK꼏j%~!Zwin~{YC>gIPM}\ +m!(Wk Z{Z/y(W`m-cS\]܇~؆M 5>Oۛ&MU(W*+(w.sSE#p;:vsUPn9u m2ج~kgwn࠘(W7'c6U}R\}AP+M}(WF>B P9ܾgrs=e\o}PmMK)5EwZXIQ )Xꃹ"C:qXwbm3#Z9>SkC}@zڐM}@zkC|@{kC|0WmanCP$ֆj~hmanCkC~?62~ɵZIRyJkC&?(.;)P~5ArT:}|R\br;M/(Ww}>k0ױ Pֻ/\ ȝK/y(WDFfD\&@%67e6zyҪ븺uBǝm&ܔ:F (76PnXs*Ov'7|$@>{;$@~ !&χrtљpIr;iQ9zCwZX]dOr ;X16s;@XPg@x;I}@!I iCcR7iK iK \ن C}X!ٖ K Jl܆%׆j%~H]}T+K ѺKs(c݇rux<.U UW]Q(Wkr{]wE&}(7V{5ڇrS(W6(Wu9__M=) (_; ((vʍ@A_\rS|+շPn(WC)d\$Prd}xKH%@5픫o?sIrO<@ Q Pn+-Ԇ,lb#!;XzpFJm3VwJ+(07>x , MjmHORڿHO[zmhO[zm6mhPͶmh_rmVf[6/6T+CG>QLOCτ_CMr6(Wv"p  {0[TPnACu|^2dwRۂr Mr5Kx%@HsS>%@Ń>Snʇ(W7 =mH]tLj> =m!>=m!>60~okC~?62~ɵZiRm~PKj%~!Zwin^xm\m>+Hw{q\m sNZr ;j]<6frK;;!F@C,c -nNsWASn ,8IHr' Pl<&@CշMjvm`n (W79=0W7Rn%@\sM{\5uQ,̵ < , 5>P\P$ֆ`8#>ߤֆ!u1׆׆`lf}T+CS0~ɵZiRm~PKj%~!Zwinq_ oxCyC]կ }/y\+}}(W8ϝ {=썔 \qO>s1UZҠTP0hrz:2]ʕGi#9$$NR\պ?T^y2&U]NO=ԡE5Nr{{dusn>Wo}(;M\r%PW\Q(W9nj\}z1ydA>5b83F[bJOʓ , => $mXPg~snÆt(04P6.:.SОUm~?7ZiZm~P6mh_rmV%uG~4Ir}(=yޯ9ql 5SuQS\msWK|Iru}}(W_@=B3UʣA>lDm\Fy4v">M)WC Lr,#>E uvx> ʭr<04[PrS\5&a7(WuhIn)Pj @L)W7Bn@<>ha_oRo}[Pn&|k;/& Ѯˇ3j_0՟.z}C/6saڮiX I \pFx{\'uX , 96 K-6-6sfsojC~Oj K Jl܆%׆j%~h]R}T+K ѺKs(WROh7~;Umn5I,=6U-K%얛jMc1a7٠\/>>U-vMrxy0ʝVW 2Wϵ7\PF8N.oL/}/&A6|u.0& %A>}ݝNSJ]wݵwDPnvmR\檫Fn=(WO.'U.ѳ-$Iw\vmI,P`P gV^w>MZm3TOPojC}@zKb> =m!>=m!>6K~O|V{Rmh_rmVIm>~ɵZI'uIQ/6D.m\M}3P RP`&6s RP`î i.i׆׆`,ynC=MRmVim<~ɵZiY܆%׆j%%mG~4Ir屫z}&6W\q>+0W%}_Nu|+MmA?MrU]{キ;l07 Um~07 ]$(WK9l0Hrk܂Xs?O^oæoe8w}A\PnV_z-OѩM4 탹_~ ?7꒠6|ꩧw3td ц[$(W@4aAz7p7+zCκqцvMTպY]r{5}_kC7 4(& 9~GSS':?na'?ڸ: ճ ճ?ڤ@]v+(WN1kh"n_׺6կvַlߤ@&vdҗ]aHv YܙsCcnZ\OuQܥ^j׆,P`P g3v7c|sԎ>)6sUwnYן1!,H^i> =%SОYmh'I J=6/6T+6I>%׆j%%qG~K|SIQ;6 U.O|9쁹r5t.qwm\r%ݺ{}vU$(Wk Wvg?j[u͏htJjР\E 뽼nh'HyO%AZڪZ\S>{kyl AN]jZ wU|p֌)PƓ&`?WӠ\guܡA,ҜP4j45.(vʍ\PۮƬkcAP*;db&պ?pe Cn?O4(Wڇ>Dz$T7t#rچ~ĩM C~۸my]/u?yn M//׵ѿaֿn(W7u풫GLlO8Ӻ6N=NSN'|rI';Irqn6(}mk^󚮍WU]|+6^Wtm/7[uQ-՝V(,̵!x:{1; , =_> =of=G(04PP`ahm " =mh]R7iK iK \ن C}Xن K Jl܆%׆j%~h]R}T+K њ .Ů f!.ފ֠ںTphs6#smeZ\7)P@ܶvm`n{o@ n0 HzA+=]:v}e'\1%cXw* u=`Cn;ܡ;Pi(w FkjX٭gMZ];߸Kr7jx<UGXr&dP>8Irٹ_Vmvuw(W7TCڿ:+0WмS(W7`dqg sf Lw(0lj> XwJ zQP`ahN>X"X96 Kj> =m!>=m!>60~okC~?60~ɵZiZm~PK꼏j%~!Z6A/{ˬ{)P7٠\PKr;ncj(jtMA.@n庻@C\C\ennP[WL ʭr<4(wŞ6¥Av]NsBK)wN(W ; )w;wʝ]Ns.ONA@wʝpkC}D8)Bs96K3/GPP`aoWP6. K iK \ن C}Xن K Jl܆%׆j%~h]R}T+K њ<֝rrSPwʝʭrcn픻[;NʚQv-t|8 N𓷭Jr69\ijy4U=!p唝ru݃ۿ?sц[r~m6JR\=PxFgo?s's8Ť{wA|֍nk'?I׆~&Ikڣ[r /m| -}k_jukSrBC?Sֆ,̵us^zvmPg}9G}@9%ֆ`.8C˹|3G}@y%ֆuI-6-6sf6MbmVf6/6T+Ck s\uIQ/6DR6ԧ6 DNCuW㢋.Z;.⮍K.[74/pӔr7ܭzknm&ڹ۱);*EuG eGykC@vujS69h-'&ݴ!GwZaCw(pzzMꃹL<X2M?\pƦuy= , !R5͋9G|@{ZyRmVV5͉j~hQ'jJ<ZIOKrZI'y$K~whN)P|7y\ IC_+&Kr7yk@Cq)P&5}(wh{p)7P"uȹڐR>B!yQ6> =-Y܈hORmV=qܨVO~hQj>YrZI?k?J}G~NrZP)({pm =6\rMAgݒt)7[PnAΤ܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTX8 sOBi~܂r -(׹.[P㛂r u|SPnAo|RbBRt:yPP`)% ):<(C}@N9$ОɅ`jC~NT+NN)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq((܂r umA:)(\7,P`'%.dQ`!ERb(Ƀ NN)1XHA}@zOR iI:\v6T+ιJZiOA~C~Ƀj)%j%>EJSJJ}F'r u|SPnAo -(MA:r78 sSurdsAyxt ?yz޿ M|sa(WӟhChz6:WP{ꩧ?9ur7|ɵ'Vn~׵1Tj)PnR=<ڸ6~_ums=]w}ws?7Mw7wmH;8rckkkڸ꫻6~wmG?~еxg\Ss98Ygյ/k_bƙgi|sgqFg>s?|ϧC?яv{{|Cw;=ykh\;wmo8c67wCrXwLOO 6dqg/}KErkCw(04PP`ahmtnzOCG}@9%ֆ Ck3gCjXXrmHOZM}@zkC|@{kC|0WmanCP$ֆj~hmanCkC~?6>%׆j%~h]R}T+K њ;A,n}FyhaVkHY$?gl\ds=mi  ?Cw?xvЇ6M {ZV70ڸ;6~_to}ïS$(AW^ye_޵qeumF[\rI_ܵqEuY& ?}'6N?N;kSO4N9q'wmtI{7$@17{ް?^w}kk^󚮍WU]|+6^Wtm/4tȩMm`G#;s-`|6Zu<C9%ֆ`.8կ~lqan , Mbm(06&q s07\ӆ%SОUm~?7ZiZm~PM⼏j~ɵZIZyJkC&m^um/x^fJro'}c7M-oyKFÆm]x׻ q)P@ڶ56>vm7h}豿C6 ꪫڤ@k֥?aF1mCe u$AM: 7o@ԇy۟:Ir}FS ڸ^NmRܣmPH0h";5xF1庞qYq:2]_;bOҠܥ;'Is( PZ7庻;׫v(w;ι3<%; AvZwÊ!;XӜp[%fNpŭ fN8Ch(04P6.׆׆`l܆I Jl܆%׆j~hmanCkC~?.>׆hM hz0n P PkvP +NN߽{ wWvK8NqXzw)P@Og)T[);J_g1W wҝQAS¶kg]XqS)PnR۠?=Viۦfj5WߴºS#Nm3Aɠ67r?я^Sͬo_HrRg}hF㗿zV{380ʻ ϒg4woۻ6B\=׾_pOI)wA샧Z}nm\ STo6}{\z , =_> Θg9ւ`~rJ , Mbm3qZ_9b9G}@9*2UmHOZM}@zkC|@{kC|0WmanCP$ֆj~hmanCkC~?60~ɵZIZyJkC&mP}Sn}Lrz'ܿO{>}C]'p vWp饗:ԿR~k Zchsм]w^5Ln)6)P u.ǟ'n܆vmr-{kG}s7Jrok;u bh[T`+fIP־mܦnmR܃vk<ҟkј>\k3y$6i޺)~|ZL^8 $_nrEy5UCprE0\wBrIr& 4:L'[E<r#[y?ޱϙաQv )PXwqGܓO>y>c3Aϛՠx} ڐŝ0F}{_uw ,mh> ZG sp(0V9jC}01>'74~P6TѿHOB瘣6 5qsԆ 6 sÐ瘣6T+CtMqL܆)ty9jC~?D$mhB瘣6T+C4Muj%~*Cgs/}KGJrH;SwG>=p@c4(w+(WCLrІB7|&(jns@cu6s+]c3C!^_}(wŋ能>o;VŐAޱM{N¦L@MQsLaym'8y-(vʍ@*7`@*;bMP 掙Dm & շmǂ6)mrU w9۶@Msj|UmrU գ?Om .bhw1!; M*/ Yܡ˜)kC} gSCXpν3umR sB.nG}@1ةkC}@z67ickC|@{6kC|^Ü~?4~PSmc? cةkC~O96ߏJc J=67JX4~s/掹-Pn\A .{mrG`vm`6Gq뢸mrE`)0Ws)mrUvCl \+V`LmU w n L R)Um -v50c,Fv9mo;}0wj۠\էj^o mo}Lk晻{}ml܋.hнMPj#0AzO כmrUO?}}<&6AMC;}ŝ զ!gnNUd8C\Xpr㦬 A2zB:n(MY\}cNY\}c⦬ jsXsǜ7vP߻ܸÜ~jt㦬 Jэ;̹ F7nP߻MٿVhtc Ѻr5mPvi탹_lܳ:k/n̽[\Q׭mr[m`t\զr5&(W5胹[\?_+ֱcŘ\\{mrey((ywiӡv̥0mMd4܉6B$hT;6(WC toCZ-WmrUnmo\AcmrUեymrU;n}_ۿ]mrUB4\ߐŝ%,`h'maFnmܜMUt8C5/&?4XsةjC}gd]SP`kLT> =m1So;UmhOoLT!>XBmknCsNU{WߘÚ~?F;UmV]}cknCnTZIwSߏOUu)s /PvG>]Fo۠\-W \pA?ѪͶA4roN`mrUij`#(f۠\iurw6(Wu{yi揩\"%jS`nu H-(7|!]s6B& u?n#m`>t9F(Wuh~av\~AcvF(W8=0NwF(+܇r[sQ6B nm0T56nP,P]0>1zC}@ߍ6K3V'KGXpjC}@z}cb7&vО6FߘةjC|܆1~LZioLamhoLZiύ= 1qSՆj%~1So1NUu)s?~}_oݛF(W~Wn#:{Pƍ7hf\զ{u.x\wmr9JrР1n#+VFm#kx2g^Q1 e49K*OV@6^rcUjV(WAHo+3CO;Mq[\գA5ש.@.@ Pn9(_U]r?[!;KY~WjFwSdq N>SLQꃥzsXsLQ%.M}@OӇ3P6F9ۿHOsuM7EmhOBSԆ`)9 97EmVjtcnCmlZiэ? qSԆj%~1So1NQuIs?,p[_PN>V(W^p.B<>{뭷Z(]rk(r4 -xչT̴2O{Xϥ@mKrSnAV] ]͋rF7 ]_;q;ߵMW;ߵ^PYYƗr">X ] ]߿ RB Rp uaPvXuS`a!>=m!>XJmcnCAZi?܆%׆jjss\êT>׆h]w-((~{+j_j[P 7NwmĐ]rXP }#. ѼHcMkb 7鷱UPn%@ʱ{M ]_}Aՙ@mNz/w+(wM?{IG}\wm)W~9|;7oqO?P_nfw}{U.=Z8okՆ,P`aB7~P,ΐGSjC}8CvУ{+Xpr3UmHOэSj7UmhOэ6KamviG~'܆1ةjC~wXshtc JmlTj%~F7~Kۼuۇr_YuN;m>=36?; ]^wrKAkj# عoN_\lNZCb lNb' =<-(\ZqX1q@a^7獛jSwU~} WvjÍƐxQڡyΩQ~ գ;6(WO>j\pꯚꛘz!C}rgx= AO()o}+otmpw!o-h>N{]%W`I'xVm ׾wιo h[u YI_P^]qJՒ,P`9Wcb A:NyiN(4&fP^z @nF;:>Ә)kC}@zn4Wߘ)kC|@{n씵!>HamhwϽ7emV]namhw5qSֆj܆{W7emV]}cT+c4Sֆh]r_wr:o++_wf۠\wG4*6Z[::H?;} ׅU"-a>zyVLOsQPnT(!wlcp.(wyѿ+r5r' IP&~M>?kr4$(W眎m)R0nS$(wDtmCv{r >TiAC7)tB0n]tQ~O@.<O4To~)K|3s8)P{"_կuCr }m]kt}Jr_DC  ܟLWmΜ nM-̸;W6dq oՆ`N8cLm7Xؔo \pzh:1fWn , MbmHOZؿHO[zmhO[zm͘>{⛴Po6 s\ن K JкV^{Rٽ%/^?h MrWkSІJN8amPWU6IP~?t_ꫯ!W)PqC{gy;P]Ơ6(W5$vl \]ԚACuӟ@bwxQk~Fh4(WR\I;Vu1@Y=ku=keƲO5qh̺ =LYz7\PFn1Mis@ruGuwS)P&D@׍Uw.g ਱a8*l@ 6)PnRCZ iCm;/~ѭm\=6)P=,V+_~ye]ֵ}kK.XrUozs9ݧ?'>ѵqwm觵8S; mmkkʻo|7 }׾v5xի^յWk۠ܟvm\֍E=EwZЍ)чڐ , )6s)|sJ}@I \p:{ KCqXWbmHOZM}@zkC|@{kC|0WmanCP$ֆj~hmanCkC~?60~ɵZIZyJkC&m㶡r5^җbCQ(W'ܿOozӛ[򖮍]o{ۺ6.zMtR\ng?ٮ}s]gyfŮ6(%WIrk-KQ؆vm?q׆ 6Ƣчr]rU(W@{[ԣVG#&)}@~զ;εF1)P~Ukm|'бFrP{Qm\1%G&w V1M+ {l;usךw<[;y`IP& mKr&$nj(W蘉e USC3@)\}8SmruSeLm<6 P?+|Lm\y k9=[=\ ~pl;zCw(0ω> XkXߌuZ C}X%cv9>P iCڿHO[zmhO[zm6mhP6mh_rmVf6/6T+C:ZI_zm*+ w=(W75|df()Pv)CKroɡ\m36 Jۘl3;f.t\wgmr6e)w.(w̵b ݤM|-ʝ*G*DZmr1@YCژoNk6B~elF(Wdַtf38cckPqi汵F(W ӗ^zڐŝ>ܟj{,P`a3  -؏uN(07S+SՆ` pj3fv>(07S+SՆuΘSОUm~?7ZiZm~P6mh_rmV%uG~s>ϣim+]cˎC}mrVmroI֤fEO=ԨPֿS>X WF(w*8HSngN ny)(vʍTw rU=A[)t >H?nGv&ca(Wl鍃~َ;ߏ)P~D?ע>WG']?~馞jS\sRܛoyԸkG=sA_}qw>$uQmS\:s>{uY]_6?):s|+~wLV(W y{DZi\;wm}4_vX}&@iEAwh!ZC?6ōV#3b.k0jCw(0W$ֆ`.8C73cu>P$ֆ`.8C~opݥ.(07> =mh]R7iK iK \ن C}Xن K Jl܆%׆j%~h]R}T+K њ<x{uX6>umz꩝Ou&պJ[WCZ7hC6wm|;4С#W-֥dp ]XaW_P]Ʀ)j׿~'x衇: ӆ{m躠g}v)P֥W\iК_jP}vYHkSqMrk8{}j@A? =6\rMAצ܃kCwviCC%(PP`aɵ>’kC}@%׆%ESОRmV%׆j~ɵZi_rmV\%ES/6D+KMA-({pm =6\rMAצj{y20ن)()h?{1(XY?!`uK>܂r[P㛂r u|Cw(C(Ƀ NN)1XHA}@' ,t> =')$N.T{\PߧtZiwrJZiOA~C~̓j%)%h>EGA:)(\7܂rTLq@ASn -(܂r&XPnAo -(MA:!;XpI YXHA}@' ,t>SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My ܔz7YA[Ps.(\7܂rߐ ,8Ѕ, ,t>SJ Rt:yPP`)% ):<HOsI!>=-I .Նj9W)1T+):)1t! ):<(C}@NXprJ>BN'Ӝ|bhOKB|KZiwUJ J}N'{'NT+NN)1T+)BN' ,89PP`!EiN>I1%tr!>إP;*%j>EJSJ J}N'{'nT+nN)qD+)< -(MA:)(\7brym;7Nq^gG ?yWSNz-hhӆR\%m_?ݟ?;4چڨP\zyu(/ki<O<}nwm5T(߫ïqoWڸ{6?T;7nN7І+믿Ӎ!ǻN`5\]}?qƏ~];g}5}Gwy{_صqgvgѵ|f9?߽~ݱr{| }'px;ko}k1ӵ7{m ~v~??YC!;Zo}K_;cznmܜjC}@6}~o?' ,89)&6Xpks}l RP`aɵ> =ͭ7iK iK jĹ ojC~fs\{6 >%׆j%ޭKʼj%~!Z6/yK6^җvm|MBZ[ON:LJ>v/YG>_jt\jӟv'xbo{Cs1Z馛 /po{Um|_hz<\iu]VPz[n7p6T6bx뭷v=Uǐswv޺cƫuZ֧_[ԣ{muCqZKwHrGQ~m-|駟֍GUZ71H.ACǬzz\* }laVkCY$?g|&t=Yq=dAˠsCc.)6ѺM}@zkC|@{kC|,unC㛔P,ynCkC~Ok K J=Kڼj%~!Z6/zы6^wme/>c&BڠB77M-oyKFÆm]x;6w}o|Ozo|M=Fzo/qYgu9Ir~um?aF1LFChjҠ\iPP|H>? w4(WZ^l)`uC6 AbCaYcu^vM@ p:n1ANb7遂rk8 厽Яr7r鮸4u(WI}X\(W4$(Wyvt;շQ$AuSA& /1ow;Iru oxޘ IPاs;4ŝ0TI1dq 4P gg?n\/=G: $P g:L}@⛴PFc7iK iK ܵY܆{ꛤPߓ,}nCkC~Oj K J=K⼏j%~!Z6}rѦ8g}>;{UWY\hۯ.Nj^t|ڴ_:Ir{Яr:Ir׃)zonP|F7sܱVA:΄8 3(ᾡroNd!PuOI;jr|r ɨSrPp+@}uwLrvcj(m +8T7ruޤ@ַoN j\.Ir ,̹1NcJ,P`x96sSop%ojC}0bXP6.:.SОYmh'I J=6/6T+Ck s\uIQ/6Dk (W;ÒsԎM)W=z `&i\Z[hNj7IP^.5NڀƩM 5ն)\1NmR\寵9\LrU 庰|SAX(wNi{:S1b ʭrP$ֆ`.8c U1q\S , MbmHOZؿHO[zmhO[zm6mhP6mh_rmVf6/6T+C:ZI_zm]ۤ@wq/\7>3hACCumWĻcS\!us9{>/UKSN[(W^1m='?[W3 rW3fB8"]1?0UmRܶKn)w 9UmR\}Ce'ՇOF>Oy(W0nw2&ʽkvмkC}0jm]!QP`aH> =mH]tLj> =m!>=m!>60~okC~?60~ɵZiRm~PKj%~!Zwinq/r?яAC}7q)Pn%{UWj]T(W\w) އr'@& UuvJ$@M71fשn'c hX݁09>ߤֆ!u1׆׆`l܆I Jl܆%׆j~HmenCkC~?.>׆hݥM x bzn?z'B7|sDm\r<vֹJؔJ(wu33%s]_&@%a"Km[6x ܆qM@)6¹܄]runR$@dѣ$@Kn)W?P(W?Շr:6 PM7(M{'Cٚ.Ԇ,̵ȮCw(0'ֆ`8g? wКyZ9՟Vtc(0$P6.:&SОUm~?7ZiRm~PͶmh_rmV%yG~4Ir=}(Wۜ%@ZKFmhaNC;vm;jjhsjjmsNC;՚wMŎ׹Kr7S)rC=8V7} ʭr'4(7aG[ =IrS6w ʝ:߭q@fYPnAP~! vMO\)7C~Zr/xO%@QOzo}"j~[ߪrY Yܙk<Yܡ9Jbm3T>o|#:L}@I pFQR~ C|ZӆEǤoӖ^Ӗ^⃹j s&6T+Ck s\!ٖ K J$V^u6)P?r?EܷIrӚB)eDm\k)MuݶSܔ͗\բ픫xʆf PW$}'NCDsWPnws{+(vʍP.yrqP@ԓIB S2>(W7{3\]w607Hڜs9{Pn8Kr7=yO䄝r>Q{# #;s-`|K_zAmR}dq C5&ֆ`.8cM M}@I pPP`aokC}@zкoӖ^Ӗ^⃹j s&6T+Ck s\ن K JкV^u6)P}(WmR~PUW]Q(W_+֥뮈$@z_:FC PjO:W)i+M07 PjQPz8hxfιA=!]Qk'rPn?ʕzЯ(Wk&9zIrUIO1@Kr6a(ڇroC?齙 =#j~{PF. PjLwZPm s ,jC}0j@w-q,XꃹڑphM< CsO iCڿHO[zmhO[zm6ߤՆj~hmanCkC~?60~ɵZIZyJkC&ʕ=ė]v١Ory}(W5@[ײuMeS\Һw/Ʀ@$RrQ)TPnW3q@fm&n嶦&Aվu`nr$(aMrZH&A?xwg_~~Mr~_(W/kơ,̹/6Kw(@?$Ն`N8Cu/6K}@ꛤP g_SO}@kZmHO#uI-6-6sfsojC~Ok K J=6/6T+.>׆hݥM+};P$(W+(7MrUY[nP$A+ /6劙{=ܡ&sS\FO o!*鼵/@ASnr]MrU0VmҠ\զ/M|U+;&. ʕO|dr7iPn0+($ y{\/\\smC's WA\rɡ& Um Z@;<Yܙ{C7 =SKw(@L pB r)݇>|`]چ~LC7[׾{;o{[֮c9ko~sE1sB#_=ޭ Y! "]MSFrkCwθK-LT,,6 3>EFDzC|'kC|ڐM|ڐО܆׆hݵyې~yѺk>:!~>u}dnkmۼ%/xK_ڵw9'uzo{'7ir^sN8a|# _U9\=h͠ )vg?5DNm\p < >ה:u֖n_j[zlk^z.y۸[{͜P>/dmRkz{mͳ_#;#d)wႧsBꇎ!@mՇ6!YC }xmCX A> 탵Ɲwٵb׍]~އY6sBYh7B~vmhucqe}{]/k㢋.4 ~;ٿq Xe'_SnsO?N;kSO4q'wmtIvu|3' }[ҵmzl?I xk^?^Wumx+^ѵ|uA9Orpw%;еfS:~ YXXzm(<7 ^ `a^woX .6Xz ^6kC|@mH_zm]ѹ Kݵ K Ѻk>22jn~A~E/zQƋ_⮍eg9\zl4{{ umPn]zW'0׹7'+5n* z4W}W_*o jv_SjLzA5 ֭kYG}U6t~/S jӠ_M,[o(t߬DgN(WZhzcn@._;_\!^m\or?k8{пS *+Penr.OAAy?ULA@IZor>/宯&# { =6\rMAצ܃k Kp]"¦w]`` ⃂rٖK(B׆` jnrן[:!~>u}|swmnCkCڼܷٵy߶m A ?+0{n-(ܿ`nAGj[_&W IDATKASnܩ1N?>w;Nz!?#}kd 3=S9wج]jٍ>[~dn.s3tfdK='`?sλo?zιS>i?PpPLя~Եn]s+-DԹ瞻QsNF?A~Joooq]g~ǹ̹Svm?I?~Ǝ;dzl?Wvϙc.N]3N xr!;XSSErkCwP8C;Ny.#o(Pp=Sƹ;Pp K 3޿(5Q^2!~6/6D܆V]ې~6#s]m%/yIK_Ү ܛs\wtc>qwܿܿ?NG6K6s{w _=k Dmfqyum]\pAV̹S>mW[9נkS5ZV[c[7߼7nq뭷S9wn{ζ.oͶ_ >ݺ_+fΝrܪkۺZGnCNm)WϽs8p9 ʼnE8h8uQ;: to6ECw, N֥ ݤM|(w6¡;..sBK͜Pk3' .6sBK͜Pk3'ڐg,.t! K\ ,,6Xr];\Ӗ\N}@׆׆`jC~dP/6T+K JkC~P/.ʝj%~!Zi_rmr\>' .6sBK͜Pk3' .6E"PkSgx^ ʭr’kC}@%׆%ץ#C >=m!s] J}C~P/6T+K պK>%{#jn>웂rMAצ܃kSP)({pm nnhKv%8 sW _:[Pn{dC!yc -(fA:)(\7dq N>)1\BN' ,89PP`!ERbv Π> =-yОG|KZiO9NT+NN)1T+):SJ Rt:yPC|@{ZN'] JsRbVSt:yP;9PߧtZiwrJZIOA~G~ɣ܂r[P㛂r u|SPnAo*My܂rEJSJ J}F7{78N܂r[P㛂r u|S1nq@榄ɦrxuiϫ)<\ϫk6 4a_=jI5ӟ7?%PYzǯ>sϡyg'|roC:nPN7yw{7nN7І<}]{zo?yw뭷ybk>4{=t]vYwh|tg}${CG?{Aǯ>W^ٝtI'>[nc]}{_wq !zț/}Kݫ_c.7mzy~6 }l/:έ.Cw(:PP`a6o5깩(PP`a67:&_znю?SP`aɵ> =myYR> =m!>=m!>[%V76W?iw'vu_׻s 7}Z3Hlʕo㎽ܧvvޚ<6Z:C=4~חF絟~u_ם<;7 j^Q]?emo c[>Z\Z39q#` g@b26 6Fuq{9lxn"}ݩY~⿆\p 禠)7c/jbߚMV1E &6 lz߉/tOmŝ fk[5.%;XXmI>gKS{_{A}ߌ~(}i>gzЯu/(}i> =m:M}@zkC|@{kC|06<~>]~P܆%׆j%~j%~!Z6/zы:^Q6 ~^t}kޓkrux;1ϽI(W@Ogu޺roqsoUP.k @\=cMB͝wu]{CSEϣ aPnxI^zњz`&\t.ۘ$D'AcMBb'Fuz,MY1P*5~|s8 LZ& ʛz)B >ޛz>;fI(W9k"ۀܱuu掁77 Or8(^W} lPvmMBڡU@nrMʣGk}݃u65 Սj?z/?؇rudo ʕ?ؠs=މ z?sfSPm nc^{ECsmkl ʝ ]*P5%;c0VDZy*P5%;X翴P3Vw5{ל\{!wr}WݜTHe/y/)w3E}w}Y9wrg/r%0ދ;{mg{9wܭ' %:K3H﹧^R+k!wpWyKEJrR%2cV)דrHr/:-TyJ}K;o)>X.k=?^ܹܽ#-RSlbn~`[9tܺ;,B̻fxYwLKʝ&-R}|1k0뾜;喵Kʝ~!w?ORȹaQ8{I/{˞#ol=?_i܏tZ^RMIn~Vlly%OXiqrds {{rƲ}l)TXXqm(qSg 7}/u7 7G\iד#oiG_iG_ym?>kzoC׆fy䵡Yɼ?Yɼ?ڐGxoONzZR''?rǞ|}*~>;H'|uH#/o,z/)w0!w/R/5{;ރ{!wrp>-w|;=*n=ܭ%㬥Ogi{I7OɽI G7ߏHrg)*>^'d6.=nr,LJi"^7jp^RHrr&r7{IZʽϵ!w9-7}Kʝ>R~V֛>{IsJ"o'S;@mph/)[[JxNk^W=só!C)+_ʸg)C>C܏gr烽u_d)h I70n{_ܬMl ܡr6TXs/g=?nS<׆r7E^Sw_PaarĵQ7̴ δ ]kxoCѬt_{:64+ކ# J7J׆d={//|*厘-o/)wXNɝ_E|OO9=k/){{#ɤLf9-w{L^RZrg1Yusx"|8{I)#΁:ʷ_ܑܽ)wϪ{ܭ!厳rmxVcOrRR.kyIg)wSrd) >V:=3bAHߦIEP·;;o&)>~vni.odr燃Eʝk,Rٗmr~^frZϛ!i#.R>9v>)~;_R{Nkf]ܹ=u-)Zx6bH>!<5>YK`ds'xsg",*,,Y6Tθpϑ/ K# 39ֿ7ssjn162Ӗ{qM9 3kC83kC8Hy}4+ކ# J5kCyMy!Y/xS1/yY^Rg~g>gsF[N)7Ork;1י_)wٳK^RG-R}3)o});Cʝ}w>[3re=ܑ?g=)wɾ]/Rn<RqJnJ,bnCfY`sYlrO]@½mO')>uB l">ċQN%Y\Ko5%EM%Y6k)7=]u)wzsiEM(CNCEʝ]͙|<~;RȵCʝ߄]K#&[3B"厘6RZʝo͙|H__prE}K^Ih"~}\x{H{ N@8ks#;TXX%r7<]yR27 Q׆r7R27 Q׆r@fr:)d}mt}m\ƹk.fhV: G^kzoC׆f%f%kCyP{-~G}TY^R^Rg}gE}k)w>3&!gLx+^-~$HC oxS)OipN#.{Hk)w“xn͈;;{k)7u]W^7˴{H{n߬"n4rXKև)>ןS'LWUIu Ms&'^7i}7$k1ܺ!cM07ofrM-]v)wdTCt-n!rr>rYu-㟻C}TʝMŤOR3Z}[=RK_ҧR^O6}kJsZd['6K#,'!kҏܹo9)w)wUz1wNU6y{r2X*-͝dRڹkVvds 8G]A"g\JZ#~-s&Rpa4}|XA"g\J'k:_:Paaum(d-yo)Rr@f׆p@g׆p͵hV: G^kzoC׆f%f%kC͗BʝLOOCRȺ>)w)~,^Rf)w=yR)ڌJ_7RnR;kJ=kZ\OʭcR^rD)7Y=N}h)wG"IqRCKp)7=srZʝBn8)SS<ƓroJc<)R9ƥNC9#:)G3Rr@9#]TPaAH׆r@fZܔ2ӎ6:ӎ6dm.,t}4+G~GyMm?Ьt_{:64+G4+G_k)>NDI-iۿ?'܇r;I-|6{RIÖ܇rH1{SMr'-hzRjzce2G\#HD<{Cq}Rn6UʝZcrIJϝ#o߾]:)d(isJrR}E??F){{sRȏDkܯ;-oz"9ڌ@HA6wP;XQixJ9CXLZE9CZi% \j~SL;L;y)!y}4+ކ# J5kCyMy!Yrϴ_9)79J{랞{zRRwQyR/οsT6QʝY;w6JJ 7%ŀR'1pD)7}޷;}JNf淘R.CʝǙ7z&v!㬥\R|R??|rܗ) o-ck8)OON#onϕ|PC}qRixrMo:q3!·3~OA\VZ}[R_?^D}||{|Rn;{\iz:3r==yY{wof-Rr{ӯX;>AoE]㕮1aR^ {qzd /7OMy;}vF$ZȝMj})7}|n4ob "Qt]yO !Λmrsk>)I {HZʝu:7gu)w~OR{H??znR;rᩔ;bگE6#p~~C>C\=R>1q6͙|ZxM3s sh>ĩxds KKz\jm(q7}<>um(q>9ϧEArC>qm(1sS-;r}RnzYyK%l"%MIOyP9˯Q}uB?~ۥܛn,t&R=cKw=7#k)7};y|M=G\K${I.II[z͞7H{IArRu_c9z)w̟Y7MY9;'o؏X8{HG~GJ__D*N?f>P\Nɝ+2~=󒗼䩘YgL_Hqr'MA+^?=#?ܡ׆r7>W7)мIr@# 3f}n [^ٖ_PaaqĵGߔ2ӎ6:ӎ6tm G~GyMm?Ьt_{:64+G4+G_m>?;rw}wEM%]z;~PSr)wmr==)w9$Y5#ZIJ;,'ο|rJ;!Nٻ\ܹDyJ==3"cgz^')w^R k17<RCʝssʯsytmrﵔ{b|872r; ]ܹ#.W}W]\!/sܷ-%FrZHs]OO*.'f/)?#)M!wc͝tcr{K [$lPaa}o6Tθy܏YK|zy-+SpsĵrK7f *,,qm(d_8)d}mt}m[汿hV: G^kzoC׆f%f%kC!>J/x Arz*~G~b|FٟOO)w~~Kr.厔;׷}۷Rmygkr)o|c6{I?O9c|ҿzLgYKsN^Rk)>9hi}B^ӵKnR}rAHf/)wdٵ;ϧSf%厣qS̽71ksSPRRefyu 72TH/-^E6!w0ro;-7^7aހOȝEPS]N]E"/k4r?ߞﻗ;5oR8?RK]ؿsi[_[|eHyۤ٭.\T!еJ[)^"nH'^R2to&x.wl=Z];y(;P0u=E̝k~X031c-.Rߞ^r~~i;?Y~r ;?Po;b|h4$= y_ӛ'M)w~SFgSqr뛾雞Ooxȫ#.}=9-g~g6͞RsZZӋ_'arYqkNȝk"7{O)pd.*Fm=6dsg?pϘ{J7|7<9-wrߏ:}~i\ַ#.ל;+^Mk;k׾)s\{N~Q٫wN!K?SʝǞ=s)w'{$T.=9;x[>Sʝj-)q=NzάYroJto:)Ξsgmwuׇw͞Rpsq'az%kwZʝuuS]zM1wKƽz~%qOwZ%a@Ik'[^}PYʝo!κEdI[<-̛7nqDv2"X׸a~LR׿wo-?,bMwo}M[ʝ~֛R|mbሁMw}R|k;ΟYrkNk>4OT;kA~'~t:"䮥ܑ|͞R:?Zr}7~i>D>3K5\窾4M)wN޺6{JOO)wܹ>?ii9vh>uZg/zыNMʝ㶮͞R:kdFy]=x:͝2䙼ܶ.g|__ ܡm(kC9*gZ%.-f][_k(TX `14.nZs {zPa&7GZi=4)d}mt}mXކ#Y鼿6tymhV: G^o f-/Rpe_eF1wxOs_eOOxrݔr?s?4'nfo)wzo?#~~ieoaPfgg5|0E]K??i]Ͻy?)^\'51{JW/k5xǓkdZKsfvO)wogmsG]=Of fvt->|gε㭥ɵumr1wܛ@;gsN[03i};bad3C"&^Bȝ[]>bZ]>@׿;\IrϽy٤'r".Rs-OKkroJ#]Bȝ>r1GyT|K'ܹ<]'뱮;}*,~\BΘKps !wyM~fCMֆrUΘlv}~Ppem(du_2)d}mt}mZѬt_{:64+ކ# J7J׆dm~oHYȝ{R똽'|ݙ'r`޳kYۤKR<Ȑ#:ߔr9@ZۤKR< \m}E^oʸ7K=$'LX2|/O,&.v/!-c#ݻrϽem\mlp@ٶ@RR}_ :/!.> ܔr/ioNu7o~}^Y缹LnJ}F`>5%NoZHS;zNsfr=o{۞|б#r7?}<5--OOɝ̶H0xNs!k~x>TON>'9_R5_Zl>kΥ60࿴,7%ܡݛ\JΘ5#q)kzoC׆fy䵡Yɼ?Yɼ?ڐmFʝC.CIӜ\ٟOi;̳桤|i藐rG_ [!N]{s9w~w.R6sx>)w;r)tz|)wYyvEʝ}Kx K{Voo>#)w~t <.zRw?AKܑq/).R0ܾo<}7}97r~Lo%.fN_?sO}ы^twiCIK97–?~qK6w.0wh9k3k;k|inum(3u6}>}ڑ/K# pӺ6Knsor?)TXx7kC9 3Y<[7̴ δ >汼nZ׆fy,m?Ьt?km{:64+Z}4+G_<05{9sOܗOj݈pz<8t_;2 {Irz{H)wY٫}ًJN'{}+rgȹ>KNwW9r=;=ξ.ȹ;2f>J]?azsݛR8<wn)N<{{Iv6Et~ֹ9!IKK795*׽4Hkfy8oͮO17 R n?1N2u R`\{N˵9aw>oCK랆@dZY9aw>桥I{|6\__ZR}k@<Ͻzyh)wk?>ɇs}'>>E_Eyh)wk6G [Dn濏4_^Z\C6wcce2N|އ^CzPCXofΈbfT~(TX ܴ rz> r3/-=Q@i[id]k7̴ δ >m'ܴ J=YkCyOm?Ьdޓui|Gy!YM5{ ?? xskN^%^A!sA]u2U׿?ӧf_ .=͉,r8 ijwCK{9k3j^Fb^Y~5 RzmFvrc}M\u?-{b8X7_j9]XK?sx8B.u.-h2y2ߧv O)imܦu^ܶir֦Im[&)mmܻRj}{W)?z͝xjm(MPSθ:^Pp>PSx*,%PL#}4(iG_iG_5 JY鼿k܇ZP6ds K9%z|A9CRp{PP.2.C~iM83= \Ьt?=4+[{z̶iV:4+mcӬdoiV2:'YCE}5Rʽ{oA)Qʽ{mr^ܻF)Q&S>{ ݳ:!Sd**zR30?zskd~9߳krr6rrnrl\pF6wpRC9BKΤZj(TXhəA9BSK ̴i2ҞZtdL \Ьtާf!cJ}WCJ}Cƴ:|[zYɼWC-J}Cܻ?UUMWJJ 7JJ 7Jw 7\\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘpIpIpIpceS2Y¯\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF/q/r=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$SUUUM^rrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ƚ^_\Oʭc@)W)W)W)7JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcR^ {qzdJJJJkRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&X >{Iu (***&V)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrnQ݋:!s>NL)W)W)W)7yVUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄kz}vor=)\\\d**&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5J{1P'd})***&JJ 7JJ 7JJ 7ds I?-5t# -9>(TXHzjPa%g IO-5*,LTC83)g ֆf>W-54+-9>hV:ZjhV:[r&}Ьt'=Ьd޷dLYɼO{j#YoɘppppcM/R'1 [\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUMF)w/̽8O2\\\5Z)W)7F)W)7F)W)7lPa!駥ndQa%g IO-5*,LPa!驥r@I2Ӓ~jt5Lz!\Ьt'f%gJ}SK J}KΤi4+iO-u$+->rrnrrnrrnrrn۽Q:rrrrapppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(@WpIF+*&(*&(*&ܐ*,$Ѝ,*,LPa!驥r@I*,$=PВ3r@fZOS δI/kZ^Ьt޷LYOzjYoəAyRCyߒ1f%>d%c҇RRnRRnRRnRRn5c7J[ǀRRRRn2lrrnrrnrrn~ZjFZr&}PRC9BKΤZj(TXhəA9 3-駩p@gZSΤ5 J}rZjhV:[r&}Ьt'=Ьt޷LYOzjYɼoɘAyRGyߒ1C)W)7F)W)7F)W)7F)W)7ܽ2 >ɔrrrrh\܄\܄\܄CEI*,$=PВ3r@Zr&}PLKi!Й֔3ppMkCyܫI4+IO-54+-9>hV:ZjhV2[2}Ьdާ=ԑt޷dLPUMQUMQUMQUM|lF)דrPUUUMRRnRRnRRn ܡBOK ȢBKΤZj(TXhəA9BSK -9>(d%4LkʙB8YOUK J}KΤI4+IO-54+->hV2ӞZHV:[2&}(*&(*&(*&(*&XuB^}'RRRRnpppC6wRC7В3r@Zr&}PRC9BKΤiI?M5:Ӛr&imhV:{RCyߒ3f>驥f%gJ}SK J}Kƴ:IJJ 7JJ 7JJ 7JJ 7(zRnJJJJɰUUMQUMQUM!;TXHiYTXhəA9BSK -9>(TXHzjPa%g̴iM9^״64+ɽjYoəAyRCyߒ3f>驥f%%cJ}SKJ}KƤ\܄\܄\܄\܄krbN+$|{k, .}]:gmf O55oض\KK懒PY^|?8{O5>5yO;s?óf~N?/|g]?tZ??8-|(qt5}v?pZ__?i>@;OS7k:|и\ۿ}77^qzwZywߝn=yiz}~מ^zZ__9-//ӿI|4 p'zͺ/ϜxiF ^m{\o}[O5\oy[N78-כri7~N?z_~<7}>S?׽?O{z}~i{\}Z8v}i۾4lZ.k_r}˷|i雾\xZ_r/N5_5?O/ſ~6}?O//yz8-l61Zecj",f9_99Obf",|ȇ|ȣ_e}6YEX?2O5:gmsf&9k|ёsn 7:r,¼N5K֙?2ӎ}osS6}5m}}osM{kzGkzX۬?k\|.9~~˾N_\_K9w>?~:î?]f,ww9m#?k^lz9?O7 roN5g>??Yկ~ןYz`kXA&{wk,,//n^Ktyo=_ux>^z}^z}^ֲ5޶#[߽rrMsqyNc̚/Ox>]m{{^9{{;v͞?9^gt+ݟ^Y{w}۾uZ;˸ XǹxqnMe\f~=kippR׉Hcu}ޛg\Oʭc }ޣ/Vzss:F,yHwq}֛L?7_`pPϕrg gC]?` [vhztxXI{C:95?&?G]p8Yt-?x׻uZAr?(Y>LY29k3Ṗ:爠??yZe"]爮?çZ@O׿\IyZtFʝYYɰ:_לZ˜__{Zm\ˇW|W<<-]v苾\__pZoF]i^җk>Pܵ)S>S^ItZOO<-'|'k6c?cO11q>Ns}G~#>#Nr}~ifjCkrZizFʝ#fHr-?uYk3ó!&e̓scWYk3Ï%9[g̣sc79m ?}g&G^?i}rg5/<9cugrן?]>ן/?S^ּ z3gr}ڧ}i^3>3N˵|~>ݟ|7ϯ?_M^{ =zbX=~ȿW477?,9k3{Rkz%M{MH{S=^Z^{-mr졭鑑k-H&E2'#\{}`zo6y:]zO.ygws>BZb^o/}]=ZRPJp|b2(e#"s?9Wʥ|}L]$51|9Džɺ8w gK[:!nu/=Ͻ>g(5sj}47o غJ缑|p3g凒|oo=)w\nw۾p>8wmo?rs?4|p6Ox'is*955sZ4|R q0sIusCs3Y:7scPn=lfݧ{sub*5AkkկUʒJZ$&$&p?Ѽ SNKjj2*8-s ]XyyOIYm[q" ܏c*Sܱş?p4 ܟ <#Xz +m$&$&p?461ɎW~'ay%1 1[şJ*VU4:Za7FEULUlj0Ũ0kڝ$s,gD螫 uLjTYx5ohk,EU Ԋ79YfeXRIKyJ:Fb@bӒ4VT_t-˕l TfIRٿ~Tat0)&L;l(ǒez>$LR2 !(\_6='tz9KԧZ-4 i9s'EF^F[jjZv^˒/dN6=c!$&L*9.䄯L&7b?|zyX޶߉Y,,p/LҌ| ɭS7^USߤ2'Շtj'M[ߖ6:ٍ%1b_jh2u,%yΐ/z97LF3-o훾㚯Ŵ%1b{TG=U859xcKRtǕ摜F@@b;˜{/0Y^1O+דJP.MR%f^S#= 2NɄ_1IE!:͒yQ5xWcDUTlj8͖(뮚kab+;NwETsQ瓘ZX菲 b5%s}+ωTYF.6^ҥiQqFqtuTeq&[j+ӣAs-yY'/{31%dVjJui}5zW{S3g٪-p4/9~ծA˴M`x0牚/=xx'j4ۻ$g~7Ew-)G޼tsy::̩Ƿs,aUU$&$%rZ۷{^OkLf,'}$b]ا6tIg#ߛZ/6b}^nn2W+yin0ճꘪ{bęiq#̿Ŏz>˸|6ꕈ2|A?;zZ[-6Su~2%(x M|bQ\e,4o+hn~sTWj~p ;~geymvi&TR*zOFI&/Nꬮ%C/rِ֣R^vsĔXO62=DdEo?HL/XQuͳ!r&T}dG`6=bo: ռJILH*D]M/n&fUK}7E֒eAֲ}01#OLԉ̥>ZNR'澺>ڇ9VƦM~-ncQ]˸|btQn$n{fEm4O~*|,](1-wK6fݱdz`bsVã+\o-V}]!5K~HLTP\t拲(q_$I,߯jXv<󧺗 {;3%2D| IeN՛|Jmr1'αo<_%f*rAOɔČ W˯L& lP:ÆK/eYn otC=ULگH.83-i:i0e֗u՘cdVyH*F=Lw-|Q˾*<r߉2E&}؆z1^iKiHX;zF P~q7dR}3r[v!nrev7_3E˳xdɼ^^UY} }IZ-0w1OP]aDhI51Ya ܻ.&b} 6՟ڠr9ΉeI%ewT^iZk-n̩.%M_;*ǐB?yn5fz#Լ$ע :;r~_jSjB|$i?GؼeȢS \N-;IzN72OIZT'Զ?TyjlPOώ){nSbojL{rmDKT ,[:phЄ9&ZՒja}#?s^a[|8k̥_"BmZ/F6O:@a^}=^z@a=ö"n/[7lN7;$IUW ;wiptY&,ic"NU~xE))^zʻd5SGӺk]5WGڔe˵NU@TU5A!zB5X=P՚[mi%ks|ֶTKTM}GDa^ s摾u±z(GG8*sʽ(aX2WꞭ;.nUbJg3zJLᒑT˩u/ dN>7);l"1 1.=۟wtҚ>)_WeL;@Ռ=<"".n}t;$&HL  1@bHL 1@b$&HL $&@b 1@bHL 1x14ʹ_P&~s19L Yrs&}/ӫnnL75isRtviN{v`Ʃw:<(X&MwkUOe~ sҨgݺѫGjKֹیǾ;04@{uIG cVvz4=)@̫V| n|.ltf1l} 4pZ v!=UŞblvߩүEjw OcTq \؁!꘣#J{Sz=][ F Z+m>.dS T:bv`H*n$aifU;@ODZNH2c"ܶ  $&$&HL@bIb $&HL 1IL 1 $&HL@b 1Ab $& $&HL 1 1AbIb@bIb2HL 1AbIb $&HL$& 1Ab HL 1IL 1Ab $1IL 1 $&HLHL 1AbIb $&HL$& 1Ab HL 1IL 1Ab $& $& 1Ab@b L 1AbIb 1IL 1Ab  $&<:}:~2M2{e|rkHK5\ del=\^bQð[zh8YdjTV.!~fwx3T#9C9%SX5F:uf/)wܝvx-k]:/<[ʹ߭2 ]{LUW=|_Q=kxhb;}XlXᑯYAPu 4BN*խɜO-Xn{ygILHY1咔V^n0ٳ$wχZ*YToD  =*~?&+a^ﺬƾ9\:S5v8,&1JzK="ϑHYէuu5ZB`M.Sۘ{+}$IڦUikVi~NZ*ʦZᚤJI5J+4WC@OjRj!ӇzB M-=ƩKoifID-$E~ZWA[Zj& -㚭)0 lGts=w$l@LQª*1I"SFb5)Zv߾m\{&fKe" 37TvyXg|Gb֕1>.a`?)Q,-&zqjAb"M&Xod1uAS{jZ#iφlUzы)ٖ[=`n*2HZm*϶y^{ޖ$]]|-6IyϮxg:/Yr/D ,'7镜ێ #w%UQI;ߨ8KN ;Vܜk:J:eu׋nU 4zA7bwLγ>h5ޱNb4ۼw\&e:xos8g$&OLN-u5YoerČr,Q}Z,9u޸31OJj>$yZ$syXKmp6_14l_egzTZO^T#9{.e{[cK\T婬G"n,2!ܚ9-;Ug{EX 7+I_CO}6嶼;l=MiI0<W4|nOXN8ӭQ7e#7~6IJZ>8]aEȔ$i/ʈsy,Zp|A' Լ +C>ljj3&'Ijz%k9l5ܭB _jmexZ7x,5tmڢYe{_e6z9~311 0YZ_ZoMk^gsȷd~+1eE,ȶ&ͫ>euz秌7NOe~e}%U<0HI[&CJ۫!?V%MԆp,ѱE%,/~.SM5yݼƊ6K\V\DHI+ߧ /eڲUx|F=>dY~{2\?3\rsVt%kxK$&ĔEd&+{=K~|͌+tQVؾ|gb~*s.yZ$9K-juT\}o.Kb"]%o{2kNXv$%hLyT_c&a*ms2":ǧJLgÙsfjf+Wc{#1eYϷh.mzQ{Wl֘bn}=uYg}*HsOU= M+R"YV@eR'uATK tPW5g`]7  Fk#E\k٢NI?.6^4I:H̶ـ=7vE-:*CLg7'^imLv˲ؓnt̛HLļ(Rm:c5x\?OV9g󲶓fboK*Zle>[sb$-4 *1~tL;E]5zf8٪1u [^^7d~ct{t$:,yD;zRz$*,zޥRnZ/2'|,uitdyY}}"Rn8<,SH _7k8aAF:#TGbzXHH>)dXӯ{2|r]PLen],2o7&ʜ&h:jc&FӣRA?˂T&zFtou:3>]Ḣ.1_ !2\q+|K~jWw-ە^ *B4.O닟y掛;nr1Ӳ߸A$E* Y*=Ob"%WJUj7q |0nɚ;Tbc492,F1~3uY/v$t In z\&s6T\ՂW+UdEW[N*2XmWrsw1%LTϲ#SgMoG/u$fݳyḢ 1kU (yy6&sճ_߾{'d6)#}N1޾[`tGbZu^V /L?e1uWdNzRUw;:}W\)i21jbdr}ߟ,_H2!Y<%Y+=[+Yj[,UPO :T9S |vgIqݺ1cHPf^Vgtr=2B~vN4%۴[e.jI|}bˡ^W==yzjϚMb@zH$V5͓4?n&+U~nOLI23ʛyZuR?$AI͇xzۼR_F[kJ*ƵsCa$|Mևڭi-s^UߌQHLܠeֹKijhg;t0WC2_k?\ȴXW4A+ZD^;7jbz,tFWIY5mZ;R{&+*2m4D5X4Ge2fDeA>ن.dN4W2׸uNy[9_52%B.N1Z"1 'j^{\c,ƨ'%JȚyM3 bn?#;,=pLmyľ9>G[iɂ55_=="_K^c-!1&sJ.dyz–"|'<_酜Uc+4o/Hz%nnW]s_&,?+\Kk2YI޼]s*ZͧRoVmTps -wb+f,uNXR!ZSg")Q7㿒#1IL{Qd٭׭y_,g]ڬ2IgakGۗT;~nd/,*Rs`& C5잉y^aX_emڣC^^-Qj???]crb rͪ>ˣfaENMin`.jl<7V,kٝ&+t;F/ݿ)OyVOb@ZIP4r7YjB&&l/u(Q\x/_jR~s穥.Uk2J\vq(iK%.wZoNnhKuR|g?_"͆ɛSkILSZꠥ%)0bPdeZ@ǵx9%\ :t>4Ϸ2'|7*H9&e-k9UPc/ulQzQ`%t>O=s(:$UV+ұ}s)ש=c6)ۅ \MC[DMҭu:"/<|ܩ3gt׽sQ?ex>ۅӱF3]iIzB#*7by.T;hLYQ&RS5G0 tN3n\\ 7߈(>/~G(f^jh~k2 W|WcX(Ӈas ~?G.|ydm:ߙ[Ǒ\?7ﭳ434U-d>D9'29ļ|UQ"c#{Dm- <{(w4YzmStcsilMtr>gKn.ԉy 7LKDUy->g"G}xǛɪ!sQ+2.26Ԯxn5P3Տ9'DUliL5@و*QuV4׹kʡʭ9DFFYМ&Z,/4 Lb 1iږ>^Lojʽ-vifk贖k&:}<niѫj򪮩o=:i!1Hc:6ʩfVjp4E^M;4GoW@N 4/n2}/iʫzj~J|:}ъUy5]Nu6}ɊWy%h>@iڬv*jX5ڧZOb 1P"1Oo9IL$&HLY$&HqV%Yå @b[r6Y"zL*$& 1AbЋ2i@bpTGu@c@b .1G0  $&$&HL@bIb $&HL 1IL 1 $&HL@b 1Ab $& $&HL 1 1AbIb@bIbHL 1AbIb $&HL$& 1Ab HL 1IL 1Ab $1IL 1 $&HLHL 1AbIb $&HL$& 1Ab HL 1IL 1Ab $& $& 1Ab@b Oc֤ILJ̽K{FԦt^3$-eϖj^febzYkeVW6"v`H2%VIGEmVcxMb/;@̳߶GEo]Nkv/ 7Wy&,Z!]k.K+b~}/s^]bRo伽d\RO} I筛.26pd[щ%f :# UU|:Rm.-VtRg؁!W\:Cwt.DֻF5rb4*"G3 1 HLHL@bHL$&$&@b@b 1$&@b 1 1 1 HLHL@bHL$&$1 1 HL HLHL$&@b@b$&$&@b 1$& 1 HL HLHL6fkh &OʴCDQm$&$&;IshZҪE{2=kRŔ^ ?$pbף.Ewyv3-P_#,v5Z+/{ڝ+0wK?\4dUέFoq,VUF/jq6uki-;LCyq*1wjNt xоvdjYJbi}m:$Q;ܨU*YM Z溁I?!ʤ ~:l6Ed.]tH[D]5PͤBuZ.9ZyY~m\&x gRĜ3aTzg[LPݮ͞ 6ke.{1- /$em}& q#8) ظVwz LZ6ZR1Fm3c>:6_Ia# T@p`D۪UѪu֪UuOܫ N\u[g: 8~ AOsirsN;Ɉ&nli5F%DH!'+zJl%"K<10S"̫nc\fukk\:7(jJ!۟<&0_<&0 eTcԿ}G-6[$n\^a!S\O:%:ݺvWcr w19Y`:u[e]] %(ħ=[\],Y؎<&0@>940ɇx+d8<&0>9L¦lyLaQfsDϛyLaQfsDyLa}TBscRN1.vpkqlCZlX5 7JQq_. +p -ޏ[—nRUG묉\ݔ#ckaѫ¹-:V5b &)hW7ψiPv{_ ߗl8\u}LEtנ^VR8>Uo>/3p6/doZw %QlT_45_ (oPT=<8:jxxp+c8gZ>imV^󽿦ΐB :T\y&2u_y(ȷSH?* wK7Qa` GV88>ҘZČI4{)ϣu;#F|GM<Wa ma{'-wK7QI9+%㹊 rbN ;)w9#! KHvhr 0Gin{bLL"&&ICUƃd@YOkyF&&11db҇(=1=ͱ$"&&11LLP8zd&LL"bbĤU]\Yt8fKV2db21C5Ƭ)5 ʷj3IDLLbb21S[6#1ƾarA=3$"&&11LLʷbL,ByFC<+-osDL/31ILL&&>%uybz @2!djK,&01ILL&&>EFZad˫bbLL&&}`|t ?LL"&濦/B<?_`Q"g9&&Y%~a_q#?n'\0{m ~DL,Bc>N!sK[ l?i?!0_4FZno> AY$[L 4]ZmjQ}1+΍Bldm>E'Hc3 &׸٪lE$} Qh mU)EODeIկY6ֹ=JTؿ$ DvuA1z @ 8ӬUYCX5eJL\rvV !h.R>ml/u$,6/Y LEGvR8^i=]]шFLYZH01UcaZOD?=L*6\H5>$1;bbF`/EPn[9vB 39Z)X*=}kW$S;Y&fm̝n!IKL U mn9u7R ?$bbAGz(yߔrwPc\HOLG#fi)oF헿^? o*KqZfjR2m>(1g&^J)y@X\<呎@&;#TF zRoCr41aBU)nP%D#iN)N)eR'h/h &?"lj4ర|mDPm@.+3t@ sXVp;<"n1T)zC܎#6Bsu!ofbQLL'8QR|vD.8Q}BS=hݽ^˧/#;qW~@|@q\3.qlX۱/1Z`<%(e9rT;8=(_VnjI;s&CfbS@"6brx 5{u0+XD)nnY#Oo&uZwϮf^^^^^^^]?FeoCR!Wj3ltbv}!`p]8g{={u^㒡ٲB/ 강MZ "0twjID11nc1,kmT D Y♶7ތ:X~J?%X 1ɚ#XX/-HBQe +^x8N)rFm,yg{ >uA)Gz{lG-4w4.[bnLƧY /8*<6KPU c 'DB ؆Xe)o'fG"Psbڿ?耪LB[ Qt"Lgޫ5-R kxD`zkՆcb!p00 w`#N?}g?1c Qo@wnc[{vؑ!F]3 X1 '|Xh).6c 6 SEء騃j1'q,BjpCwC]X Y) z43`/:Xe?%"3 [1 pKi9 ai*pAT z01_sM$hCtA\gh1=.&RGnfq-B([. >!`xC4fcCi騹 Q.2x|wcX~X>[NmMVodƳEPWłc?>*A(GH;?eU(7tNdIOm\̈印2;ukid.3VfQ,^V;XS`S~?c1ͮFC*@$eO$bb6FaHdi}SbDKrWۛϯݥBWXN6KD0Z!ڨ]=hJ1s6իR?t<s6kj)D%-l5T_F_9&% @v%ϛ(KQ͘Qp&AoUŏ{NEgY8]ZNbvD;^)喇jPb]IZb'XD8Te9i`{2XRo_狦[wA_&n.6ҏpoYeNl/cNI]٤^glȖn'qIFLeV=:58!80ߴl3/lr1N_&7 oZ\J_&bU\2ER+n+RϺm\D!~v;4kPc6R=C߳V =b4Y>MmWq竍ۜ͑Oȼ9a~ƒ2ݶYn8=A6~=䐘 [|M=:{ sECIƋNhϋfbQaHLAoQD )y̖;sLfǁ,¬D)UVDZPƬbk**шZ JW9"Cmb }YxIQf~]GT/-61ƧCͪ97f}%M!bG ݯ=:1!:E#oP:VRDob&x'1atٮuA>81c"z)REϺQ&f,vqRTBuҤO& !C~SJtE# om@iR_I1|#*)uoQho71{w%ǐbҩM\& .jRBtE?5}mI%3;D ۚQFH#רo}38 HRߨ\XSW4cjD#mf<t('޳V'CJĴ!kK~)"/:lh?HtźO١l#e$c? ]lwGyO("No%4B=C 귻9y9VbR3WG[VpID?1AtrYhZBX1U#M 0HUi=7^{؀ ˁS!8zzmܛ$~qhBlq]˲W_[yu:foH4W"J_'X]vjxcXI.sr>[&氽oNs@b{!vRJƧkG3H1$|,+Yc@0ەb&GOLP>U=ёKD/+s4=W{3tʢ,CѕdI3 EWNOk܇XIш?_n RLo%43tC#B[Y(=8mv/`DTs &cu Wz+E-HY)#6p⡨=Hif!1G fb,Ƿ=&&Rjp1D)h$d($[bNHi Sa6}m xF O61Ǵh#l0JT HGbf,N`Y:-l -P Unm!JJQ:&fN鋩)O6ލ v#ѼAHIRB_\ .6k,O7g1 DpJfa0ñ#5i n5^l A2Oʖ[@IL@T;tDGL&GOaXyBLUm2LUl>Fb6beHi s->_A<@.د+0j@On1LC1lL QI+31?؍EAt5~(?9]J &Cԡ4]b&31z݁M8%_ˍ5EF'1$d_bEGEZCJ+_&FS@ 2gW``۷xgݟ}kR`O5;ILD{NQMcb~Č{@6X%L#Y’y+բNE/H11&y[bVK'MQf Ejш0d~";f2Aᙉ@"8Z/b+:C[ o'&s3S!ŤGxV” 绛e&f+yn@\d{Fbz,FWXQbe3|%,k2)h(Gi&%C 44 JѕfW_TZ eR:xt51z3ZCS? 1O@tRl#cϸZ3NsD>WZ~Ra:@<JlΘ WL|wD-QD ՟B>= ^@fb=dɺѫ D!anÿ4H! GT|T\ %Wi}>41N~c;&.;!Һ],EJA,6L^8-!6BN[3.S.@d\fbf.>.EB,Ϻ:e&fݳ;L Ez'1d]q)V\r9$V ϯ{# %:&U!F+C%X,fbQLDs@WsuO? owa=DQ{@<4O-MC)ESbv]x:x;|@b^(~/GNXxobu|pbS! ^A= )8cK!;ybS%2GkR 5%1-{eԗB:~u# )%],09x@ʜi.g?X3> WtoSQkHyiK}|ܖiY88[!31bĴˆ&B;&M̵Hb4Kb6!1+@S')3e5h!=\n)DT1vb3Nt:`$+|["ZS#K9yJe7 *E_Ư[bZ#17T 6k*i~4%&yOXTث"'01sNh0GMn$+"1;4z#'+5zuCz9JL,09c@*<] 7sN* @eZ *>yJ.bALLWP%|9UN>8R|~5lF9@? }~g&&3s^ ~B 6W|du~n"?O'BTQKD-WqM(A)_'뗠B $sf9 P/$f%'o r+ݍ迕D4Hb? |.W  yy+13_b+ D)͏=F.3t?"^X|71ۡKe? 2p[&! l}RLڷ%QTE# 1__:W*ꬷ&f;tw1S[9URf8U!-c/pbvNs=idf'"[u'?\Xj~}Gebѿ^ Q`!WV?h=@ {'" 5 Ynb<,"lH?cO? 6i}zEbreMn'+DRG[Mʋ!A|-1]D-]"7;h%fX,duRRB{힗ļ%N*DWg-%&[a vC8Sy4PgMJG42phb 3\W?& 7pUBsd:! PvR8RƹP!F)F,.Q]duޘ0@2kbvD7xQHacMu*4XaM A4D!yH SDTc 'a1ILY8-pn9Q/oyŖY_z2x7&&84~ )zͷAtWpq$v2M@"b}}`a_#}k~KbB~_nh>B 7:ŽnPRL^~ޙkMO2qIL3b\UB*RVQ j%{vQy0|q1 `' mocb?&cz_wLW̥xlNL'=3*8߄Ȧ-)Uqymo0?*p,3o%f {@,$(Zt1q Uspjz-0Hȵ+v]z;Ub(AJѓG80 :#6^\ EuNFEb&/6!1բ/3Cbbb"/!u.4>b}mϫ4u1fiSGR_'y_+a謭twnG&&Q4~77;WȎ!=,E aS,|Zhi8WZP Ԣ:皘*{wn(jmz' 'dO>U/7Ncjyd"}RG EZKtEWtE=Ш[F * ս>zrT9kj5H~'@Qѽ;[Ą~  M|&Obv{!%@j&m7^/'4`R_ ?L+5ݨ6JHuzfӾ8:1ڰ=8 :=XJe%_A*%/Fbb*포uꅖ|z)kbzmr,[pT:)go\SuҟvW9Ji!1:R,epQ0׿5]Ĵ،:1[Xǽ1ZG,,ipPiL&&@k߃oe$}|3ӝE\1op͋1s g->c+c :Ѭg(ߋz)u_C11DW,^)բ*U#rW:!1hD VU]#,l > B 8yST<$>=]WV~+6/ D+٘EU|oCtkaWW 6k8-l/ۼ]k&fݸߤґKEgc9H#,X Ҳ;1c*>oRĐv#Bk+o(0 =/)b,e/5W펹@/}c bw{]Z) R\+᳘:'sQ EbjPsF v/6D|bZy@ GGU]T 6fߘLՒMO0rPL;q|eC4Fbce3?qb &^g!*˛cLL"*HgKCAyG0DsQJ X E-y4$؏u(@E#}Vvemaj1Mi4(H4QJ^a y|ZxI؁M5WDO EOZľ$&nQRQT5a`$!/>--A<_ҤoJ艑hDO\n=J>`cYJc=&uB¶2/: )"ĴMK΍WoYR]BL~O'>^f/;NŒPd׈7|{5>`R!b(J,7"Sḱeb!3#(1ͳяfK!mC0 RB X D% A~(s@z}}\LZq7g+snTl*DWD'ydgP銯/b~,Y_brYV=謹(2M&w$sYMb^qjR䢫Y an%Ω#QZnOzLOLǠd帘@L\,lܙf=` i]HYkUC+"3~ZccbQAJS74Է6-Ҡrc}FjTTԒiWT~Bbֺ}r-r׵alBj~4}I+:>}pLH _^4-"mYj4_ )^cu0rꇪZZVzV5ähgԁ_\2-3_fw9z%&\@ti=hbZ,SslvŌ)J'pצV|:;jzhDЦdzϻOK]?5?~;1{{\מVXh0yHqsXy혖LwfA4*˫izs}qzqwPOq.׋IptWc[F6) =VWiTbӪ>oN QAGx>uSTT˴rw7elT!)^^#sK-.xk}˶ke&fqwFˤI-V~Ae"Q?*Ӳ=RE =^8n{*?)jZ,R._lR]N:L93orU3=Ze mMsyt0'_^5@"~)5EuKYqWg z{?s}1,V QӋ&-i%#ݧ71Ƣ_vJ &U$QarLpr6P6Y#!qq,cg.\ ^wL&ϢZOVO@E|)[3 ޞ`N?:a»SXϦpHrN!)%`) qCrc~ g8G륭TETd\> W z|d2'#aϦ3qpLm|׷A}{mvmUQ5A701S_|_`TKb`<"oI%!H\Mu{s93#nO[ΛǫNrjj縉jot_Tҿg;'Q,F4nu$ry>JV?A4['kU{=9+ޚ6 0;bbQAJ$\̣F;X@x Ͱ6e> 5yqwp `&&qá7!v2&btӳS6k4͸ˁLO3XPx!ξu~ Vc|k+Xqg2OLFMbzD\ﺿ9.qb Ҳ} !G}|ؖ1gWkuO78әX$gyTAbE]؈H8"Ba~K,vZn`9$en؂w|XЇs,S&&I̋pGp+ӗ3gJ%1 V;ox}(gc0ѾarjUt!|IDLLbb܃q 1zdfb21FC$"&&11 WbC_ҩ?m Q! RV<o(R\uć{܄X5+&$"&&11 {b>-1),!&&11db?dEY.4LD$&&If84}5P H`11׉ILL&&>P_?7n fMIdbROU}qY(D{'G}LL"&&I)Ygg~_ѴOI$bb21IkqFN"=hoفIDLLbb21aV`mSDI^u0db210n?ŬruD5(@$*>pp|qjĜ{)3t9-yvb-vq_f{׹%mtOݟ. ][nϥDF3(}rMsp|Q~ϋ)p6MKy[~B} ^2hm+% z]f}Azm9q|&* à/S98>ިװ^GcO| | |*{=֣ N][nV}|p0_ QU}"|6_Tpڴ'էB5s^ܗنo(O-x$!֗YėnB ?5ħר9pryi[Hx } 3 d_DDDD$""""&&11DDDD$""""&&DDDD$""""bbDDDD$""""bbDDDDDLL""""bbIDDDDLL""""bb11IDDDDLL""""bb11IDDDDLL"""""&&11IDDDD$""""&&11DDDD$""""&&11DDDD$""""&&DDDD$""""bbDDDDdbDDDD$""""bb bvc_9 0 㹉(.^fI'Hw,V&""""/KDi8<9ة=D:Q37&""""ityT@Jmuwg#f4j1tkNi[l&""""ـ?jmPRbZ^RB42p\&""""&fKoSۣA+ DDDDD_D@q>L$"""yLa<&0o>9L""""۲cro19IDDDD7$"""$ S,=NDDDTPU 66R/r[* Z睗 %nZਓRE5.sN睗 k[8'jOG;/T=W9 X[;/=A;nBږcbDDD$&&IDDD$&&11IDDDdb11IDDDLLbb11DDDDLLbbDDDdb21LLn """bb$"""bb21$"""&&11LL""""&&11ILL""""&&ILL"""bb$"""bb21$"""&&11LL""""&&11ILL""""&&ILL"""bb$"""bb$"""&&11 A$Q؋Εr@ 64DyI9\L)J`o1EѠ6:m-p{15Dys>gl~}bcKĺnt({;/Qưoٯ]($@ Āebņn8++B{ls58A(*H !؅]SLnfLNŎwna"Mvs^}r=CwX꫱cPAߗ厖}lz61DT9iWMQ6EC=0o{@ ;Mr3+CQIƗ!Jx\w6U-~\;RkaFe/-hQ90$ju40}@BommľĈAuR:s݆IDDD\ #-e#a21esH3n@BhpR1fW^ L/zwt Fu h371TiFV%H=31C1zgb~JQJ#㫉j-~,pRUF]NMT!bީf%#Q9'1/.5HaLD6UI(%vbbk]ǝG[oWH1>Ĵ-vL+mKިX'ǔ D 9Xw.'%[{wP{hI]#g￯UѣoqgCLƨ`tQihJ.n%71%ADOH;&31连!S6]!uNN/D)'OKm  \{ϑ/-z,N λ@yW-j1MKݹᎎF(ڮr__ Au4sNAH#O5HFDDDNLj*N fB瞣?_C UmìaTD/1]xȻfhQ=nE8 nHU\4CK AJHεOLAeMHDDDNU 쵁A bvDWn<ĠoMLRDDD_OL+tU"S!U75@\NLZbK왺[n w|Z|:@ ELMͱ #qB$d9ݗ[A3(b*<=(>@** iv^=~V "a{}p+!ێp=l$"":1@B )![6`[^?/Qg v`A?igRlDw_c\z9 YzZrFYYE^yaB;K! }X(@zY:"Y_}yI]Fh1SO Uϯ*:h3g6̟Rd;v~ $""01GbյV phK '_bjs֫{Hkgoq\6Q@ 'آ fa[9RȻ#6Zao=iπZЪmWwkDhZL*Rq|@tVF;"UmTh%H] (Fh9SO '}A9KLv~0M1IDDDļgZJ^4AlQ %lMQFFIι%SQsb@TwTԫ`fkZm{Rؖ5V+ q84*$sְ5 ?E nݡ=71kܜQI`W-BV[m{d3;$ E|pb=x ~o=~tE!"11KT^*)*M%2i3kN跞I3?@bqsTv@: \B 5D-QFoT/Y ("u}pbV!DbV=^eOhgbgP#ay^\ J _/PRN T=A~xd9crs1~i2X*mdwQ ڼN%@l$]41z@{h5: EYtAĕ Jܗc1œy 9i(T$y;\NEqIDDD\ =+,,WㅎhA&_JL S3)'`2l/:-gA*.9m?|+I(5A<ύ3xyN oCrQ\QH1ź^t !o}I:1LrZ#%&4N(wZFb&# ÊA,v]31J̭H.}RRZڮM[*DOjFa#H1:m9$@[mM|'*1_qG-÷tD-!.}_@,6n[}v޻-\z. #R4!-uJ\Uz {;?qXpMK bGu꺥U=]8o$&0 JW9ѓEqIDDDĴF\ V7fZymXO ^'ŠʞOO4$@ y1|bRD!hZ b{8벃Z$=TR$Sc`3(D-ϴE!J)8zB x6p 2<I[;v070`gӧC b{c1WgǙ*QKneBeLVT !W3gm qF]DDD_LL`^/U6`ncRikP{I~kB<|O]ke)t?b%ǘ [J3~c< j cEVMö{ܩ$gYgY໺vA :ϲKAz"^ AiᶽϲzM^yݜ~R/8-TDo?߫Uқ,`aeh~_WyLߊIDDD\_p`yM;ohV;*QSo=ưGM؄>X@ Ց^:FR a Qu_cpћʋ.WZ<"u6瞧`@/T8͊1&?9yO:g ެ-{.\"ÿX31这wqgq'rumU154sr ~b7&b$ !'KNn4[nXq kX)N1vLA2.Ho3Kq 1q?Bg9gKHU!٢ }pGpqIDDDĤ$"""&&11LL""""&&11ILL""""&&ILL"""bb$"""bb$"""&&IDDDdb21ILL"""bb$"""bb$"""bb21$"""&&11LL""""&&11ILL"""bbr"DDDDLL&&DDD$&&IDDD$&&11IDDDdb11IDDDLLbb11DDDDLLbbDDD$&&IDDDT8sf<~v#03_Ѭ N} >ɐ^%""*qf~~#,30.3833gAOevϩ|QdtczXO5ok%ֹ`yO_"""**,F!?;<:vgx1 qO_"""ww@DDDDLL""""bbIDDDDLL""""bb11IDDDDLL"""""&&11IDDDDLL"""""&&11IDDDD$""""&&11DDDD$""""&&DDDD$""""&&DDDD$""""bbDDDDDLL""""bbIDDDDLL""""bbIDDDDLL""""bb11IDDDDLL"""""&&11I)݃` w!=B:gV |ܷzwd{]}d4%.bz)'ubh:9dXg\I"~4RY pe͚mVoabF8VZֽ[8\X.`w3~^q $fbvߵGj08 {ծ?Ls>tĄhVR⨧exsϟJpkabmqj!hI`bPyBMo:L4DY*gnn8WD#rQ}`4|ĤϞ`;ֹXVbDbh<0WNBCSf*_ȌlV^k%=A*9ATxEcjSF3yV靃 ڹ000>9X=Xh׭D 5wZb)gݽp>->^\ø8ֻ(aQ:ý9!|eo6LWi%R3V=cg&BVUck;jr}4J'-۵~uiDT#hj_86c;>-@2vp>빆hԑFWv@V<1 Ua c.H!Y븉H3>{xLAd6nb%F`%RqD=q`6_1 >W1 >W1 Ufa5:"U c!]3g2O<G !Z[niK(VCr&W@bfaF'w/77xVtų;灃wOO{Ç !n/sr$k38]w9W{7x]yZ5 t􀌪YV%4ݏ3UJ7OL|Hg#ߋ%"1sB=oϪ8ݷ~偏XS֌7tF O/a.7L|XSԺt3peF5vV5գ~Z8c?8!VF" ݚq|>c w]ιvHz#_>fzdb?. ~ )~8? jI\M '*3&zNT}R-:hS/g3) P󇔾.Wh:2,LL&&11T9c,+nEPeLLbb11ߙ28:P ~v(k31LLbb11s;o@zt$&&IW3=H؀vuw$bb11Y55wiEEk LLbb11EVZ_*^z^[&&11DLL{?d@:lvVԱE{01I$bbߓK:h7F|sX&&11DLL0eh_Z E]01I$bb&u~՜v5V$bb11Y$ݪ Eg&&11]b*1 (1b'f"s>yM`1$+t!LDA忒C0X 20 UzD'@ 6޵f01oYrף:߼:`N!Mbsyͷ]]v/I$X J`#rҾ(ٍfp XY*؆e8h.C{1o h5%V0yXP~-O S"Mg\K,CnĤ?K>S]s;sݔ7q%j令~|[wvj]&UTVkmߓKm09Oo;ރ`CK쑌O%ngKھZk8`[:=^Q2]hsKzt80`$njCWmsx:aRJ³fYq?UnEpC :h6y'6㷖bY0rHv$f7{#=oRp٢)wVyh91/Gf]M_VF/yrx8\dbһ/Zڎ]5vǘf.ö%RC,w h7yEeǬoYT5@kNZ9Ypˬ-(F]b/QDS4\[1E>41SrG^ügD?ci[\U=mǣ"G4^ٽ8+!`6Q`X] I21݉f&v7t=]yuBGsD3(E!b*'Z8u͹l|Sy|3#:b,Yގ BN_E-*QZ^ߒ })]Uv o4|&#X(E%JђROk\4ΪYb.%D`ALrts7m di@)ZbaĬ,o*hevqN@(bvhiyHKJgV - hݯӓI<5ep/n bmjjgCp0CK C>yV1w֕w(hOLߥ-E  _z>SI'S}rņg|2đƢ-"m%G#CORLju^UhVjM?rj1\*1Ec艎h舮Hi+KC珑!z'WXYےi־t<>LB帙ds1~nV410Jb!?eSFb] m);cfb&2|[έ,G.]K &Y~]e2J=t7DahJ-O{u׮3{׶.' HCLD]D)[F:f5Ɵ θRQI)q4cQb֬e)qECRwվ}TiIaݯ&a>_e@J Q6۠- :pAskfuiR&ށh_:C) EU,S!b(7O˖ٸFK̩^}ol2|Y^VXMX3D[*mnCgx]b5]bWظ Q]-& HPꕮlR@J'ɟ]5M~L4_9X]͐}聚n:r'8^JMJn5b|1 l\iݐT> ^H1̽7lڱ*>.[9!0*EO>[ab(>荾^O Gvfbo, SLhmRI^*_z{!FR;6[0| q|$kcؙuUYk(iT4V31 ^WQٟUw fLLzWbx Q}E-eFF\oP Rpr |S:>XVs;v;>Jp_bW9yt[t?ypx! ڝQX6Q O r9iE2kT)R,򃉎 NL.g 2RO ^緕ɿ;F5:RI]ߒ]ѭY)QKo@gȣQCz] [{Q'5|X(Dxh/JB +a0`/^}(ѵ-F 瑈@[1A d_IW!X0ǚ* )ܧn~^Vޭ-?` ѸBT@ Rp W0ï][kr )_| pɀTc\e,D T$c[@o?Y(l7)PjKu HWW+x `/ t#Z=FbZcHwF{!RG[NR,P{K;KMYFz7ZaQJ-=# sq 'qmJLY,A]p-M> BB1g=b #-p`M1 5xbj- $f~<@[b6ձ7qW9ؚX&{]MO} &֘>Y 9#KQqj;3{LDBi&ɸQԙ%?f؏ɓUO{LC3]@f*1m+Ճ(9Dzt_SEyMsuB Y˃iw$f48qhruhR)şorhK|g?̃CThw&&Cp ˩e +|C*8 ڂ1TRMjT,1}qqAm<[19@9SCkWj'1.=qj#\L h _-g3{zjb$:g{=',#`Luww om/,!Hk<m&xe=rx;6<Dwm9o=&_KO!XB>! Ih_~ɰ,`dU?A퀙!mQ!}jA@6)D?ɭ{[v@ql=sm'[5hq͏4A)fҵNΟ DO|cWy2tD)EsmZ?WK [b5ĹqO ҼUKGQ u ⮃i'Vp?i5.~]twJLLD; Sy}!O; *1}$m]{Ri*>qMXYn^<6U./z!.5]nr| FN^ˤKwr@T-;y{4(?Yyz[+=_ klB,.-)wpcE&ׁ^ }BJJx~651>ض*7aPj˦O+^۰l!M~4=wn~hǎ ~sjHL?7Zow{P&(|k6,89 ,jiii^Q>Ǧ)|y8'$P{J^XZ5b0]ޓZQC%jhIų\szw,%@tZ@ 9([15=!?G?(bS'i_&8i9h6[Yp: ?o`Xbmvo!?ouuل-3-@ fjm]o' +DKF% 5=jR D% q ߓiy6d-mY:gkv(D巚y)D%J)%a!a -QHmP&Ԣ]@*<&( */R;G(W(*JQ (E-n{P&^ẖEKEWy}+\"r~_OE {?ЈkzQnIq_n;RxVO @JKx.hJKaM}w,_kSh@JE.Uu-Z Rʎ^w:aLB_jKg*-z! )SܸLH qz6rBKٷ`!Cnm3} S޾pw^Fjz M]h&q✵XxEB])E;VXL@SIAcJ$);ߨDG.b\1H"E\x{=5,s-}aEfWRD'd ,$ܾα˥bUbw)W0ikXRK-@F\TibPKZ3b[3C#-_@JmW1ߧcJ@4"Qj֑p@M3~1 UXh@|.E~~ѱe?}]U~a quMݚ\kX) y R!qb5[>[@qLٖ\fzqbQ8= ܩqơWOq=c휮Bb/޺a8Elvt8X!Zu%c /O %6Uj;ymXJv!;1+881MsݵAH:lL@b.F"FQafe w%&63M#\"Lat{ q 0 p (GqDq а6콎J[8#Ÿ>9`15EzwEAN/Aۊ|uFyu DZN@S}{f.Fy B _=PVFΡ<ɭώ+k8m{25~x$ UcN ,wpcn %A쨘N*(HLvRl3A$16MDSv5<4[0{p|ViPpyTP݋yWt|TmE,1Ddh{KPAOʟt +*ѭ;Z6;.jyXQ.XF?gkӤ6&P da*6uyȱ8XIhq@e1xF{i_ ,4e[olPoٿ@v߶K$FR*WF!$g@ n>A} +S[g8µ6U_ hU*XΠ~`!ߔ̽ךXi|RhȮ*Zճ!_[;yy9=&1A5Ï\4?406?&YgkֺXۋ= mwޑElUEx,-_bfGƇoZdI P?^;iP-o$ ӫr,gSB*h0V@lᙝ˹sR[;˲}ڵV'Mo@TbYl:؍NL?TMb?أV/rXbGۿo)hY]aRYm18\3Ђ_^-|qJV#Rɠ]hfbv,3rZ" ķkudӽ9iHFxߩ74OSvV5E˶`M {Dt Pz%MxV 1]RXdwph1WGi#p=AeA ktra*sԢcW #ҿ/&lC [ ii%[P3kv@W zcy׻Ź!3KX_Ԣv$ltzEI6>m8\WPVg@EbF;uiY D@b9j;jD:FMwyTL֥.ۮ>sXq{\Ztx1PC C Zt|:s6?qڌף@,eOSPSeEb9>&? ~f۝y\?S:?<1 ׌iT gFWfs}PM̦)D*.*+,6+Xbnbzf8IP34EMNa5d-H0g:C j0TTmԦQ <K}oS J{~e6g* yTѠ.Geߜ䮇fM)Ĭw*1uXJC9kKuDh3&49 <;1 e65Xo9U1bŻ%KV 1m`bt;At "j_3vRxs4Gz$&3Uy>( 0 vdIbOKL T-PQS~!%Z^sn| 4,a~4|p:GG'OB .f(bs G{n} TԚI F?3?=x}5 Owm>(1[܏lBSJ}ټ Llu TBT&A k=Y}aaHbcE7>=sאvck gOGzmre%K◌_ty3]51 <~ d(i@eX)w%?f"Fz l0gj?k9\բmh{a&!)b̌Xf 2]bb0kSvs:&o/W.,ņjxk>#@ňZd4|Pbs{OU۶ 1PW)GNM$- 3S'KY0P`^K%*1+u.ԥV ,νFk.A]5;lya(sڡgGše!AZIL# "=CmN=6<cF>Ѡ=fyaӚ/_'MRJy‘;pl.+~͑#0e&tsgMlO(" ܛ}.9;:Jx]M A)]j81:b^Z8LguNboƭςF1"yXeEeNsLrtx,eeXo $o)A5T{)Z>T@nj.g֙281Cרbj{obAk4nsF<ڭ LVQC8$ 7j{lT(R5D,̟8(ly`]p^FbEW`{I5^t&\>X0K*n"F`hpubFե zSc܃ 1x?2@4|Tx\ga& b/jy޴HA A>t?Pz C=P1-E73LA5H#~sx&S:`#1#ƌjsNt<>TAϫߕKxGbo4G&0TRR5gW7nEONG8Ĝܨ3KWz؇9#m/eEyx*wC޲1sO*8qv]><0m*v:b:AETc57ջlzm|4x펰 NH2>:k1nŲtx[b&mv6_ḶݰRbWoML Ѵ grDq@|b۔.]s7XêNl&!T-ObVr lNJg|[q~w1uE}h^~N0eeǧAj6BtnNC02!˜>C[ u٧Z: %=f6p4Z>Ϡ飶=p *j^ϖ{q&~,]F N绯X+4w`Bk}<$ltVzcCoSu^h1ps~lk_zr.sj8> 1AYq@_%ftof2"?!@A#JQk>gGie/7Ĕ|gb&ZgR뻛oWdqt3V$`/"?`'Q|sV旬妣.˴JѣG3f?_zXYI6}r7[QfԴ{AA;9m92CMo$fZ>nY7/HEʾplE Vܵ(X %u+rDL&NGI Ըe :Zпݟ`8}؅cQoz̼b 2P׊%~;y[3B\ :?YX9 hV9Ϡ+.kVۥ>{ڡ.7>t؉.@Ӭ=>o,~_a:k[y^b5nӠri}NLLE/m g鿑[8X/3C"hul:x3SQU <K0UT@ET1b 5eLш-WgP7TuT;f!1/E+s }~sC~g<[S|jbkq͸kaeuI't^bPuI ?׾6I= ͟[ZFe/o;E$rxCDMՌ>}/:$'j}sL:8]2cmkuv+Ͼ٫(AE"K=k\/ȷr+ :vޑhD]V9۾ϠȨ2w@-6LݥČ^;TFL>ϭ_& Px}[~71wu2ʴgVQb@Ul~ zAm8 %1/XJALG7La7翐ViJXZ~7qh/14k@#{6j=-Mj|Pb^1nZQ^Y]N K K K ]5b,Ludͭ_{f P'1! 4"YSb̟["=YZQ*iuU#4)+15Ata<"x-Ւ-+l\?g{hVVc= sK-G#㘰IR3|56K;q:eL£Z=s2&;=no/2|vεw  @s7Uhݬ]zMVx2_b&_*"j'Wy >A?:4wkׯ=/>6oK61|GМ]٧Ƣ Ae!HL[" qV>)YHn`1&oyZi}g Xh^j;LO)3m4ٳf 08jqIEylR k|^IXky}U{XMАkJ'APyyw&fs-UߣOMQ`6ptne'>գԠs8ڸ73H]bX쭲 y:12GVF6e'V;}}"mdu-*l3Q=v q4f6KNfwYs)CGAi*y.SӉ5JA-Т߃ڏ@o N@LAbժ\?| 3A[P1[=}&X;:B t=^ = D!:" u0C>]\y!Am?7ylXAkH'x{M%YLRw9_ֵjTScm|ܧ;5:/v|i\)~4cf=4P$,? _b6FEwțAƆ/V)167Uu_X<.DOT'& u6&P5z$xb~d+Zjo- 3AMZES͊q]<0cYZ^sׁ!Wg8#+,,hFLSvGGmYSh3Z+vQ$i:w:Q9hr/V;wXZa}, 6 HoYv1kܰaiZN+*JAG~=ԡ\oXҌ>Ӻ뛉4>~B5:]VgT?(s4F51Cjpae '3&!+j@eUSN1?n КgC鹶BL7`tNlZҌft:ǕhϷ~>/[(ըuèQmYo}Ŝf,G3l(NU|4cܫ52Y}kG87H͊ O\δv ]5t&Q'ٙgƻO!ժ<{\}«\ sK4 wUu~hIT5:Y|lҴ>[?_QTS.gs=Ѿϻg!:EN/*:phQ+V,Ͳt}fem7<?L夨hF V8!޷{02 @`_+gYcl&.I6]wOY.#g>ļRF3M;PR1ӿrݲ,M'k/[k5Ne W~~m}b\]du!1#O*~[8M61Gcfm]/]7X]* | *fm44"&<&|Z˹  n64/oxLJQc^WR0p}eqijDtx\~X$tR.wLkR=0Fq}çZQx]>F)UUX ֪yuR.sHkYlN= Bw%v)U4 qA D zVflBGE 2tg'Ľe3{5o\R[[VkP5vxxIKV@"1u7MQچ[DŽ'2xGAgu,+&]-#lygLv-4H0IuzX >+lDCZg}tmB+vZp\SXͺbk$2jg؎wjגnOݗ?FH)73o6[kMhuA+xqr a#s5oMLY {ambSkrN[C7XQ r˭a+-~کnlGC}DŽl3?>Mq'r^k;@$έ9ML6T8<8a>?ox\x ?!P3|RhLxLx|x U4w ئr{M>9?Y1ewY15wmu=v ]M\\Dœ+q "5U&&bRTno81tգ4T,1Mir/3<:1"K0b3CHw @Hlp{ɹ~W4< eZ!i9P@ "LE,b1 010k`9/{cfc?a Z` 6afup ;-i}>@jjH޶)(_OL!)bр}QqoWKtXbUXp mгVg3z CTôWd_f]eSS|ZbB?D;x]~$J\Sr'|,v\Zh6.J6J%(s4 +c ˞E %6$1?ĤcJPMBSB1CS~oϘ%iVC2_bbR`Yvl_ g^ngGl#[oKw91Wpt,D%1$LcKcלcZJqY%HKر[\1f`9Զ$)>51wBK^U' c*T|IL!)DS᠚z7fo 5z/)11YLb<0iFĴDQJSz}C$1?QNAj5_ɨ?=SSHb QN&~t\ē!KJhG1s僅3.Mbv )>51?Hb IL!_Rb0rm9?u{3<:ILILIL!)$ 9"vF=L.fRr IILILIL!)$܅/OoveI0ae_cOIL!)$1%1Yz? j8IL!)$1%1ΕS7pcP幼BSSILi.8{ SlH/)))$1n7+eo8jO{WIL!)$1%1)jRD~lY‘ULȐ}yf9S/mxs73=nڒ{C8tiۥo,Ѧ0>B;"}r}%2o,!P3,yƒ5mUEZն=mI4v`M!k X[([ᜯۄU]-# 2 2dߟc8v)`Ncc!|pG%TDZV=r2(S"B!z+Fvo9U !BI^a)B!y0eS!Bm0eS!Bm0eS!B-oa:B![n S1?,ġ;~9 !<1 "S!zc~Yy+2BF97,DBu@j5ir4`{%:{@MX(S!_S\޽|e*XKѲE{fz[=vOmSoIWRK}P^T2BV5vLk/4$9]3luHhbql]&=E_-d!)z`^Ud~ UK0P ],rGKPL{MRR}Z&C!(5Yts~8 !>i/8ML;Ļ@Kt xPg0t!dZ*5D%bmpFjgt!kD*ϗ[9'$"tDg P !"`<qf2BHb[j[FMǵ!P``U@vB_A>l7Z峫B{,P9=Vcqʔ!oAeѾ3Z{\D!>[qTn~H)B2 ?U0!LMP]7مm ؏_ms}R{ʲ:'S"=Uۀ/Le;:!$1]wP]g/d!!DwW0`ɤOf4sykB_D ZyKmbybk>oD;6E0C.:Y51ك89GI" a5Z*?%!}j)Oy3= E[I2{2>*ʢ*'Wak=>i88o<{a-8ѺjOk4zh) wW SM2d1 ͱ߀ c߫vs(oER41 Z]'x^f|ܾ@<de|cqǖB`SPNT$LBIL!ć ;@Uv,s8V] Xއt̕IB!)X0*NjL~6e;j!BO L֠?Tڀ( kP%*~BHb !>~$cg{-Xoq1ِZ!$ υz?!"BSw襡9}J"B!)jДY}Ȅ!B]٩Dۣ5LBIL!ߗYΙvWB!)'n'::Y&C!$#fY q_&C!$R 3AܕB!)vWe"BHb !)',P!B!)nj\}Y!BJzCD!BSr0ILBIL!$BIL!$BIL!$B!)B!)B!)B!$1B!$1B!$1B!$1BS!$BS!$BS!$BS!BHb !BHb !BHb !BIL!BIL!B9%oMK82IB!$178&=v'q1e S!$ox:ffLBIL!ħ))kB!$1Su5̉21B!$1.:a !BcfaNIB!){^cfa^5L!BXuA Zhs5zȬJ9,c,3& Z,ٚ0?HnQ[^?]<ԻHLHL  1)1 1$&@bDb 1AbJL$&HL$&ĔHLHL  1)1 1$&@bSb 1Ab"1$&HL$&4Fc,hu_X7Ǥ&@b?GGE]-{8%"*c87X_Y+wʉq̷J},3cpw_.b廓$&Ĕθv63cƐAGĨoٳ*|r]ͬ3:nyqnzx /=xnSV?,aбS~le65ic#c#}wWbHLU?ӗEYY^V{.guQgQ}`u|\ѵ<}Viu C'_4?k/JL ĬU#ȢӦgdGęwVfen{G>j}D\>: Y}=X]DMV=WTR%Yղ%&VbvVG륛;̍ vL;l?*1_?16v$'kί$N_ovO\YEu{!;q棇*{>AA@bm31Oz{둝ԝX[%_ub{w8rG4Ks,\n1u,CKDL%17K.M>§ gIL ,O9*rdω.W ebΉ/Ȋ%k׏#;œ"69ͳ$ߚe!gUds}k?b>-!ϯ$☛ei Kk^/M>=fml]bHLobXunqqk>m"9Y^8)8wvGvkuN>, 3׷+6 1Ab~Y2oݗ}uOQל#UdyFF3&FJ]slE9cỏSneMĎcb]*}~o۝&1$&жsiPBȒsJ3f3c׉9`=#ǻ;y+ryBxO-nߣs#"&q]~ge*wYs338kݿh#y]\I6s/Wǿ6_^8$h/o{#/x%^<ѿ{m#"VG\vAxAFD'VTszMcƝG56c;:L 1Yf,y5&sYB.15Ur,ƪX~XLmcV&OTWg\w8/2xbgW:o,$wcv</np쁘E1=DFcD^ukbZke1#žqT<MIL  1Ab"1$)1 1$&@bSb 1Ab"1$&HL$&$@bDbHLHL SbJL$&HLn 1$&@bJL  1Ab"1$& 1%&$&@bDbHL@bJL$&HL$& 1HL  1Ab"1$& 1%&$&@bDbHL@bJL$&HL$&ĔHLHL  1Ab"1 1%&$&@bB[O PpW-1$&lv[pR;E685c˗G_~e4whٲeѼy={Fs̉sѷ~kd2L&d2L&d2J6l~Ǩvؾф |{'}F|I4}tv}]8d2L&d2L&dmYGo~ѫ[v^{Ϳ/x7ߌ6n;6ꫯL&d2L&d2L&Sno?xHۣGhݺuG >_ԿKXg}ݨI&;-ZGN8.d2L&d2L&d2冈:uj$`_]tEN)xq6f̘N۶m͚5+СC4yd~ɝk2L&d2L&d2L)/8p`ǚ5kUn9sJ*xA ,(z6lذ￷i2L&d2L&d2L9^z plꪫ\ʕg9WZ5wyyVZ0~R[*l2L&.3fvw}7… e˖1iҤRYF/S G@k;omAk;7.z79lG^L,YyGׅ&~-X ݾ2'kڴiޢ4 ۴inMW\q V Li{_0a5L&iV:E>>A?=þ1?T OQFN:EzG{lTlOsoV+xҷoX޽Yֳg?W=t{nݻ۵CZ^bOڿqǥ qqH?soꫯ4ǿZWM]v~֬e+oߠ?k;]ߠ_f-_x=i-L >#/EWkkw;>D/]FyQnݢf͚uff|v>Ow&MVc_?ξd2v=.] ,Yҿψ)⧱l޼9<~`F @%:QSޣi|,ӾbeHc(o2ǓG>lSOZ*b 3#޴Ç?_@;vb4 r,yGBt?i=ZMv'ע{m?0[u[m;^՟'8Ik;MnѢ{mZ[+kk~|˖-h־})|>LY.hJev=I;MmOWltzFяϟ?C/NGW<% S?-V7n֬Y~&d2{e8<`}OHꫯz U>? |Engh٣O7mp-d2v *UVd/~NQ/x3xwС2@u)Dc@Xv4/Sexf7ns=> ~Qݺuvk}.b{D 푃L?n>Ӕ#fV\\F'MLjy 0it P|z{|>nzypXuZ%JDGqDeZ7~k#O=a@Anհ֬I7apwϥJUvTR'Sr?wּ|5ky͍xXp֒ҲVɼO?="UŊ' ZyQ"E81jlKp@?)?h:+9qTq qZ .kʏ 4 )ǧYвN6|4/Q\5L&SQr*bSSTo6[#AbDr{[T, />߷_x!"so=RJ34]|e_={+T* J{ItJPC@pCZ{:AߕWmJ/1Lz`.J|ݶ=#YS%kr7Q1M?KOf`Z^ ֺ\oSk q"'܇5j8ݗ*Jwź%M&dʷx4 [P@uR5=ϊtGƽHz({8>x\ysHuu@8*M{ٳeڲNyuk^ TF=ܩ(~q@wV8F`⒣Wwh$IVXxک?+hE7g$. n&S^xrK.0rS|IoS\~vRhA(MM} .F"OQĩs"D}ʸ/rW, pѳwSJ$-M1/Rj4#p?Ӭ Vh=_rM&dwZpOK ]ve81U/ |*JzeޯJsN9lKs1,{91i)>jD5B 8׿調kR>ʳ{4R/"io8QNQ'I8 竕A]\rNNT7F~[|H*j 5kpMpY3kHSLwGE=i3e([4|(RY+^4bĈ))9܆gNQwHk2L&S"%q$|M? r8 )}|`t^WdO ymdך5&֬5Yf?4muҺ9##]BTlߚ8h6X}[\u#p?grZ d2K-X\=p&NRw7Nׯ_KoVSLi(/BUm_͚5+(JMEUE҃i92&%_ߤ*Pm8- cY"}j*GiQ-;~\OM֢8Pޠe;Rڅ$g߿y?/s#r %~ݷހ3fEp Zfdך5~M%3Twm{SLԴi*.zWqFQdVWA=W^ig r7xxn&dʗZ|y4gΜh˖-<|NiVQ)UBeAޢ~yE>DuOCr }D\;[6 .X`5DurT>f .-}LK496UN8ϔwNK$sUwF{j@jj ?.@=ezGJP_0xSo-U'bާP.ʯ@8`pԦ=(\. 5kpMpY3kJOQfŨ20O:9ZUXӯg7.W9RuR(֮]uC#d2LJL}yEU ߬䋍(F뮻R+ǫjTvm'`;@6l[ xtEF~t% Bb@UZ#弍q#H06RWAW~:uDSNeJ߾=D/RQ׈ ZZ(rFu5R%X?B^Xܷŋ`4 gώedך5&֬5eqI3Qu4cȡF4. o%7j n\efի*fg-yp/њL&)iʕ>B)bLPTw@hLS2Dt}~"(;;vB N[_E),Æ bb"xcJS2L@T@-gT,FDC>+eBL9v} k6FW e|㖴 ǯ_O?%On(t Te)7ʰ1hq(`6tP&֬5f)C;SA-jTJ?W_ttWtufoPn\(T&R)T Km5L&S0ѢE_~'[>ZU|C7eɓ}FIa@j붝mSI}*)Zhh]E{zت/F.e`/b?#ժ6 mۼys+q[R+W?3^8)ł/aߔ^`K,2pD Z3kdך\&3g>}~FC(YY  ,U?ܟ~)-\Aۍ7zk2L&SѸD.]4!UADCDR K':N\DzK,U>#f88̌J/*Z8^ qLq.^o7l\R3uL6!<Fd0yF/"njAI|+[ qw|H5jI3xAS$AǃpB.ξ uTgωt;V U)r2piGo\|d N6 &pArSJ{CGQoA E# .0h /k< Z3Ea_J)S(xgk"  u{Ȍ miHvY,eNAEZ|q癊 g1>_92Es8ҨQqT<g,Bp; A #jjUVn7cѣ#fqф Lda4F jAl"Iߚ;ޞ`),|52Ag6h{̬ף_qtG=קٯ=UP@'i,IcƟ\JP p,R&dڭ@|g=y @G-=:@mӦMKD.BQ>QGUsHd]HUUPN63t~D.aATi5>ӽn|H㐒"쏨f}yӬSV tHpv*3DCBxLyEZIhJ$ZN 7p1<xE 5{x=!:)Pcك~d;,pχ@:Ӊ\A_|qOEZ\MHUl:@GZR3(d`=3S>b vRa+|'<p}o h7ldܿܣT~8 v,^ΝXca`_?<ߚ;ŋmN=P3^t 2 6>0vȎ6g-d|yA^w apM&d]1OtvQ HD:]ԠmD]":%Swjt Ln_+N.)Pt\h8!jG58Ao`0TX;Sl#@ex7n.q`: 8㈥D^࠳=~#p\pP,#XF -o\_r;>ԍ;U ;Tp>,a&r>}A4/}\?RCo H|;%Y8gX~>!<ؖd@oo?_,˴+t)׭[. oCJ:QϠ0Ž31rG>g{ -y ̿/c )‰-^nX%^l< JIvʖ-T\!52-"–  Ǟ{@1eЖp?]8fY+F{2ŲZ(75hS w3۝H?O_2"g־ж,O)ڬctLtʹq=-~aq( -&!f `#a|(p5wojW;Ƞf b/>hsL#ày;RxykٱkP=|kթk߹{vL',psj*X:}kpkw 5Hta=}8_5os|p1?J`7sMj\\UQUukjs)x;.nI\to~k@pٰKET8vmo+{NvC)6ϻ_4W8q^@>?X6E?dggPBl`lU :fKrKm+;WO =,p) JNTG /Cmֵ݃K\&d2v 9ǀl*l p%JprҗjDYElsTȹ-#P^ۓTy$)* F19 tX,mp<3QS/RFG"р ŀ"[ҲtIQ]Np THڬ(0:l -RDC%K&{챇ӹ*h[e˖pz-{{M9\GubC:ޟ}t (N3\`HI_#2{7M 4a@Wi~ r_>GQ]n_L&jNQ{ ="rpޜP(6>R4 vxQ7a#XYYv`knMvGzHs%B{ VpǝxB{Eow1K7ԫs^s=wKm|0q/g#C41_+4f'agD*`"pX\Af?ձ7,/s=~ ?eAl4"wŀb6p Y3&2CC1_hq>?:G`3l,S_ir#^Hfd2L pI@S|MTDYraM8r֗(SM.a@%bʪÉxyhX9zM`"f':@XEv`}E^91")tGԭ[w瀜^q/;;Mw'~5˝^xܕtgV'GW@njq [MF$kwհHsz@9eJ꧜hX8 'O-@Ω2([Dtnn\g6=oN5}Cv"͚{|l΂pұgsdC"do=2![zIz b\0ﺗfJɖ-̝-08kPt/ua> O=ʝYr^DVtwPɔgLtT/⟟nuCJ,Q#vTõP$otg}~{8h?7st :kmxpqwglujgW;͍xKap~=kn .R^9gON#% }_I (PW;WJZ~u8AӗdOG![eWm'.P3*bF"3wޟ`w}5@JmM#.e#o*%Y0߅mvr3S\d2L;1'w"|ϩVK~8?#βF"!D{ r2UMkrD-0NE W@>YWu0n֭[$Hɘ ?A٣xim/ AI@g88 6U/\P4(EGɮ$ [F^S}|9&{!e**'[/PuzWod-`DӎE6& x &")cȍ4UqQ2}g~!M=W1Ew{(.f)t,02^Q XlgƁObe5q@ #+dn\h .s{WrWchϻ=@{Ws[yoh%WWMs{eIϛQgZhpuO1绿В6ᕉ+**{gjynA zA՚׷:ЈvG\ ֹ_[p=}!/p%cGZ V`fPbhD"`+4|]^ j{s$\yoO>l?믿u @Ia3+Hi&c%iOڋ/Ͳ˗/7kd2L&pVǍZ^lU$849y]+'/1G)t"m#"KiD> ZęR fLmc9b0!P/2[" 3qOt-S9b6d؛BD)h QD w>?/YttR\}ދ `947N·g x'ɉ^9 u[N*1SpȃdA>lXkݛ<@)^W-9bYgž8ާM-&wyH1CqÔv݋Ci\˳j+PI3AIsQIkbl/EZh^E)lYg; aG<61e6Dp&2PNƈB=}ǵДܒ䮥(#QJcQkLFYÉ3S{5vcد8)%Gm\rh~>?lXzS7= PyىՏ\Eg{>zw^n0 m)|%t>8wtE9 +0 r4,8+tG sUIIgYl#pyv}\ܴL.%!ۃ]]o}`czq*Px9vldf HVzW[cxF0{BσdI<6B4VYiTgy?0G^#Zq:e/[?Rގfc9&g;Gsx q< |\ܬ ;ݙ.`EM`0%K"X_rb#M^@.Qb9bVY_d?l`]>'իlX(@e0M64@`c̼b6pۑ0D;v\d2L;1Gz[w+R# :>g*DSz+cYMsFߡ;th-ߛ r*Ҫ4p1,;BP(Ru3 TXXx88 -s.Xy:V8LQHRs}tb"0T8f] @Q}=JP,>EU8o9sm>d9dWKî)invN"J7?RΣP<5Ɛ{rk_kzgkzVl6d&+iR p fp)@P'9QB54.Q_D!2)#`1G//}uN/Uz- V;2235j ˠ"Qd]He|)e~bqcNWW`;둮 !slӗ ЙiӵbpN򹧹9+_P-Ļ 3[CNp_*S_ \ReE\?T3 GԎ΂>WZχ)-ZGӲ]rISvOۚkd2L ʡBx汊&mq52) E'V,ٞ>}zbT 60LcH 'dtS\ApK 1ZR*`Bj0׉S>Z lqi&:QϞ= |ɎpݼyssCSp KDq}R;pt H#>S=Fa)2i" N "9>:IM/e^i"̃_H5.pS"ДG/H@X}C[wqǨ eS[طgߥ.)Zs_$RB,#M~Uֽ g if!0HaI ܝ&{ F[mc8$ 21F(W.3=;"caNLI.9nonxI؟)KEϒXϕ9hW3m,׬]E p˹ޝO E$ڊ\$2_U(^II pzVBm%G3\H /Su_@fVeyV0O%r O:ɟF,"~:*9}S>ӴЦ@u(YPWޜ7 <yP' p=;䙤E~+sq<xcʉe^j}[hpSK@9FST %V jx*5||2kp3 `Hr.a&@c RR,UMAKd_ޗ/{FyO@F4d`!\GD6t7̚Р%\7էa0밻\VO).vA피 Fſ.ҔT1O狚e=wJٲjT xEt+_a"R%]ZgO7YqyORX0(%ݖӀ_`>?fv F M[S@,hQRaͤA D<(rn?ld2L\Ɉk)6l8H:e!WTtSXcׅ--??5 ю>h F*a~yq$WPC7Ǭ d 4[LV ,TMQeKd/U SW2ώ(B9U8LЊ-Jdr=6pi܌ӭD(wJ,s4yC1:ݮ:ϕՖrᙃ9iʏ)H'z~?˧&J|~"T |Rm8ѭ{0V&>{*f8Gk-Q-Ͱmu{<p*"QWl0XEu EM~yӔ|Il'Gtx e-ehJu_M-N @0J( ]cG3"xd{Wώe(5a@w}ݚ/!p9 gIŋ 3Y]\o~3/Y{f{&rI>_m<>}`|wx1@93+\+ f b?-J|oDN\,m)6< 90`<BƜIu;tJPsfP K5bk5k2L&i'>Zu4AҟyHs|P48z0/Ot@A81u!_ 0 ,ο"ؐt L'`-ɁG~0%L4F(m (B”>@ Z)l Ȯ817Wx/ĭ͢xԲ @#j;:Y+(khZg߶R\M--p)w%-ΛV8`'r`8Yf;[R*>_߿RGdИ_.٥)Sx-yp̆Jwp!%S"N.urخb[ 5Ol-_^Eaa L$0".Th{焠Ư! \d'- *3F15 Y5q/xQiw) @ **˹+q{0B2/KFlN pЧWLKs p}!4XC:LK.ƾ{[Sz_=DcWщ{*\l`-A}& G)(DדF*?qS7ҵ/Kf~АUG E?`i/ǯ_B̓ ;#͖lkHR:3kd2L&Np}KDr+wCGx**'.W싉XZ*)*W7mZ ͞=*z"]b:8 G.J91r)kM~~5X0V9YȊ9?5@Ɏl< Ĵ4191^-*pZ p |ZAlP4i+K?a:eʑlL~)t:L֦B)?ܴZ ]/<_Owmr1ğ{h8ĵN~y@}g-m[RC޿/rY˧Eڒ%nxV`P\I&e 2PW~;d{G}ޥ+} jI٬> TvJGv c<;T$r'|Jرck:> s6kd2L&Np-JnYl}Jj( 02G`qF']l-"*B;Atq F9-T@& b#;"+a4w\8Ak58pS.}a3 K'Dqt`"D5ȡSm0A>d SǨ'V_?mJ Re& K?5Uy XHb:\7% 弪74C˧.O#%ydo:V+6w\+ƴn8.DvgDm")2?XC\ݻϻuIy͠q f vw fp>Cu:s4]PlOa@ +Knـ( p?j\@B2셽Wzl'i =BDZ6] (XYAٳTľZ|ZG,m3hQ<7 H>q_;7tmz nzM%4rǒ" -););C="f/ތ~l)YIog}ݸL;Zc**$z2q[t_9W>|š.[>FX PW۰qw}\nڴ)pes= Z ˃k ?E|_A\T,ؾ3e*2(6yd}Y-mTf 2XbMER@C>l㔍Q:p L&dI"mWੜX>/+iHy @Z#<d WZW@(%j4`N? \9R$"9xpEYT[*)Y]Q^LIVk5΅\c8v?Z(Q\,b іS1Ht08Ȝ+9p/_*}gXN 5䨣ց8D3mX۽"ClSQ+.%NW~ܡѩ 2mR;%7dQAOx4ܯ?䮨x_؅J]^UdOM -^ʸGYnܚU$ q/r*j>Z(9 8r/"@srkec ؗ@4/Ck*3D,t@FG@vE='`UH߫@:8r,gn"ƍ#A?{YWrW,̀U7Ͻ?c@rʗSpn1镉먴"f#-.QʠhB6 E̲!<ՇWvkˬWg~AYT2hΠ>1鵰Q MO1-t>v?p6UrSXX3ZR%}3 -k>#yuO9Wz~;Ѳe˸eѶ-x6)h"FQ[}~OH&d2v=FN35CHaȯҺ{2 G.pLaWn8QJ߀D -KNZ: d(uH@ENqݫc9C8/r| "K'9ȘFd jO "hQd9pm,;86ה(_A:w]ZTgt̻"pW#~قtCbo#^Ke@IG#-gMg9piya(V4~f\TO-uTҹ}^רD!ut}A߽tzI( `/Z6dO5c W_}0~m(HCxL =z1x> 0Z~20ԦW)pB+ 85fcec 6\*钞 7n4 dNp@zGk<Ƽd!#JsEӇ9Slh s~MIcв7ԫϭP3m1'Ys/?8[߸PPe5r0a'N[]{c}s/y-zC屓Nx icɂ! hi:6ߖ8ZnUt1-#Jnj,l"-Q  it/?6lق/5*Nz;Nz';6s+yKx}'`BN <z&@+@ Qn0,@爝:&Gfn!'nd_:sSX۬]̋tuӖEL^çC/9xJ/vh9fڮ prseCI-@PӵZ_Gw; b_ x=p衇rη !j_;a gǎ_3M`3T x~}-G@QJ YMC6f^5k2L&).y>?@?u'2No B~!`#3Sh(%@"3ZAwCߨg 2 W`3MsN4/9ytVċbc=%G'EsO"]x S545dGYQ&>'`g^HK5g]:\9`ׂo}$w4I V"+*R]\Eib:-t,k_3Up}. :fWa/vuCϩ5k~{mKK!t?)P.Eצ>_m4DaRBQ1prNJb{\F}D0EӮeG׾OnP4 ? 6$3ƽ4\p 4+CH" "2 BZ!->.^48@$ͬ'[͊0QDc4&/$~6]<m| c}MW 8YD1;.p 9ub ν04Aנɕn3J6M Et=u}uF3rWP O=ʍo_Tyr/|_dWρUs<Zνj*[ÜH֦#}a|Gx_ ʎ%ۇAȊ-j@pCn DE]}IPr>eS8KpM&d21p^d DfԀ Oq4)V>4BpC-MyAӉ6@3)#BN6TO~N&b( "};Pzea9MŻ0A cDs$SHߐ9Z ̙s23ܒe4λ"[R'y\r\$F-Q͜B̚8xn$N2_O>pGD_?\D~CZù{.78ny_t-n.WڶѭCk+\f\jfQp=Wq/3 LOD.^ Ro`["SSo;  .J 0E鼲"D :vÿS xbV!+H\q=s[F4zA燠\}71pid2L<B %qK7)7ִ*:ɉ{FPwΉТst| ,dڡrj&AMD)zj>} N.O")# R !SGsDaAL"h^u hﵯ XhaJׄB_efCw*kU$_a"t,cqoRVй}h-H ?ιH`MC1fM}lT˲,E3fcp>ɝ{I1S.iOvv\w\Dw~Jםy½YI@ʁ2殭\}Sٺ6:C x~2/\{ v*m,wbiO/jWH>Ԉ̝wѢEF O"Q7 `| RB'U+VT:^Uvw6$O*krkxL~STHK0w38N%8H@t?]QXkew6A*!Z45(sg+BHz.=Kv`Y:^҆ʮp/^|;igFئ(WپQ/פ Y݁4wܛ8+kܱ'#zYg{'H@Jw_ά|;qy{Ege 0geg5\_8p?w.JUOuޞ'Gng@_۶?#A5 1azZdk@ u_Yخh /g|Nt}~/"ELd;H:+dlP6HT)XۛS}򛔚aQl*->1 j.Qp L&dc#ƴV{M38|>D(r74i>@T@Ui"t/u(F,bJˈ: gh{Wۋ(VZyGPd9qK#hfΏH#3vZ(-*9Kx pf9O 9i94 SjF(*4- 'Ny+Ct?=~nE3hQ*4EyH&#|E9Ak׮e)ɾ=@xWzgr>>@9ww7O9dy} we@W!dza;Y]pqǝh7)gRr.Cx:^> g, H^@lp+偷!{[4o.vl;~cK ut T&0:Q"IML~ŖcW8# sZhRgnC\d%׫RxXHJ+*+}&pW@sN冝?Λ y|BoOڄ4kwrM^!vG\p{4{yn3Q5A,._ᾗZTaEpg ]-v3_eUI65X_{|~d` ֱ& Q'&CVpaݝ4!ػO5k2L&).SlCQ88LMn<8ZC@ЊHM([D üyfo8B h ib;Dz=n(hF!Pp?8&ryGm8v"@Xh9&R, rrV˲qނyB'rĵo"yM}n|g,xslH3DÄsgY )%uYn<- y-zo׶5cOmG\"Ա /`'Fׯ6oBcC]{ }~ "n "l \n>q10 `/y{ӲCt pl [Y.y1:_~%S{Zcؕn9ʶR"PSl;"S>yrs P4أpaw;4LbP{yl{ P_Oj;za ?cq yk~B$h qZLk@ҹS\d2 f 3:8Lu%ɓ'c9:@$ʁ}^>Ph,&hY <t:Dm1-  xqȁ4ޣ\'uj%1%la S*EDe*YlyZP:S\I88x~\"I\Ir㸉А3䐣sAgd>c99cQ8>\? gE;{pStkDc?ۖ% A:l6 )b?ߣU% &qj7U)[n0( ό-vef m"ЉsS;psa  _2 0O᷅[lu 𙰃b@@ߴ=f(qmw3e2# {qG=5ȏ"OݛCc\{E+'.>8>\&d2vEt%jјe D X T:AR o":pԁiAE:8ӀY3D@)dDM@NH:qqMy{)D:P~msth%/$[<ĩu&2 7w vt5k#jVve p)h"lĉ}l`6%+|mp9'D0H~&@tvlaf659+-4y؅ `% b,|& lǎ[hePr|u˸9Gw~5kd2L&NpM[Tr$xFe>ۙ."yZp52k)UM{g+O2\d2Lpw U|=(Fm[ExtT"L  pY3k2k͚\ӎ`Ժ@9Oo,-QaÆ \fQ\d2Lpw)eJc `vi6WhQ֬5fi"w;R*ۊ *UD'pBK'\&d2 "rQrȅvKnYr2k\kpMOVXbE['f8G(*wP=5L&dwWEۨ,O.U)EN4Q|j7֬5f)%tPv*wަNQ=-JЌe\&d2 Zfdך5\}.̷ի7S_&L 7nJ*91bDD4kd2L&\\k Zfה"Q7h`UW]Y{ѓO>y[j=a1cD+W4kd2L&\\k Zfהz嗣Aaq)ڬYM6N:Xu׮1.5k73Nhʔ)>MG-ڸqdVs'S3c&dd2kdך\.PgϞQnݢ~bǎnժEUV]ۨQԾ@[bڴiц gey*#u;Y]=-ҥKDG6(@]ٞD+ˡi@!+UTTB[jxҩcVXk-eEufV`iUXzNS{ieKkYcssX]ҬY3'Xs>J@$&[J^Jjժ4v:rʌ"^#x衇OgqR0,^qFW.lԩbwvRcdIΝlDPpa2AD<)$+2W- 6< AǨ׋5~9>m_;5/fm=:&{o^ȹSL'E\>9^Vc ׃zjo1;v,!!~lR7ˊRК2b%GKFj֪[f;l|đVhiv5jU_kuJN7)Bkmjhڮf&[cEv5[8f5}7a@EUVEgKBsLMoNhu=M /9O7ۑf Jn>)v MO} X>$*yrUJ`%K,鮽aԯh+Vr:^^-gG++ C08#q _пXQI $2ÚBinڵ$'`*\"E8)gG1 Y6* iWx ʕµ 彡Cz8 ^Uǐcclg`%*JΘ`tLc<`JpF.k}hѢu;0KXВ:˰m|yO@dLX>k`]阔 E2`کݴU` `+@=$fEXϞ=cM41)&hĤN)))ќ9suO\Ljy0`M?Xlpe%ҹJPs=ㄪ>W^5йq ͘α n~acjol2ȑ# Jq>FիW/QlW *qy9}P/%)*r-,,,|pS7Xwp{YE7>ЀQ~Dyň*oF ^)KrFd@++3U(ߊ dfFؚ];fD,b/\rub_Cv@-IRN`$POEA7 '@_ܠ,\ =.IxxZ+yB2>Hԏ[5mz-R*O .uqNM^?Qp7իVQ#DŊ}nK!nP'emQS?k֬OAo(bRv#/@YO/Jdl\K= x9l @oիrAsTb$6#[ Iз׃d3/Ń֡ b*L /{~_98oE\{z>m{; _P+V k9/p}yfq32M ?F)ZӅ"F)뷣xpc[M~zR>] &QoTL?[`pO ^%-.uN~-?Yh\yOsdg >-goa?H`/TlRti謓#azhcAֺx%OH>;4PV < |Tmz0{Le9O9>R-vYՃgNG}RhI/s[}8b=Ѝt|;cAOS7l&6x)+}6Ga \#Qcs<'eDZ7JU)x}{+Aஂ~꟧u,wAZ 0g? m}ٷ] )1ƲwRhQ]a.jss=5j=a4F=omVFЩsp`g='G @plaaaaaaaaaaaaaaaaE!L qI:-gSmE]Rc5{‚+zqs1?u#t_UY"q8`"YW@' "Ӹ*`ؼd\rWܻwCԩO+%UZ~L@vk/ u0NJ'#ܹs߲8vS0]{`"g!(6y8()[^>*y A2y:} Ԋ/5ueEӓ%[bQc]5,  DzX1T=/Pb'Ծ7gKT;' rm(|p N)|QIH\)au|ZK4 _]s!pneOfycjL{ϯ#|\8]Pe`!Abb e*fb:fJi;L Guxyȅ{:P2>d,ZXo;l 0ݜ)% FEjqIg끷X36bO0_P5;2(m`ZyEyj@Oέ,VT{$AQ'1~fS?GؾCkRTw'<Cg6R}3uIΦC|(+dy*UȮ*qBx6ޭ[uB!!ZLoʜ(Ug,49Fw C]_ xΓX'x:*0[؍j<P)!B?^׾gt \*q8 ][&ޟ.\د>M| Ziz/E}vuII}Y%rm:}}{ᯮXn`i=>LZ~WgJe`d $y'8:Z*ց4M(5.ohv%YU,XN, Oˢ&M 3=}wWP3pP_u Iط,0ujR>2D^h;[/dP?YS QId](R8߻˛p]%!޹u%(K!SP>\cXd~+uK0*W&]^-T^z '*O՜W: `GJI/Au>VBǰ 0}O_^JTJ)^񲥾xb%'[NmȺtRo pkY5 |sʳ*8m-ޏ/IP^l_8U*RMժUgP?n H%9p3 I"\ڧsV 4En貓= 6X*>(a5@pAw3z-#{*qTGx_]*.VĔ-F mɏG9 a  8l[ejlW^-~n(N \̵l P"yL5*lww/wa(j{-|y1<~\ k1>ԺxDRJ9w/[XXXXXXXXXXXXXXXXC%ӧ<Q)QS3fs)/oH1 vTRQl:޷@0`cHΖ^*)GQK$#Ɯ%.S*/ee, 2{B y!0 8pߖBf U ,>LX@X. uj,%@$8(MI!.WgFP W T  $,}V |y66 IJRI!\uy pyo ȗHOV~lj`;Z <>"@Ս1W0oW r $[>Lpu1~QBwPc=<\+uQ~spXY`窔ЭX `2,,,,,,,,,,,,,,,,,H p|2+\Fk>TـIA0 sG۶m$=F &޿AMLb\(e:Z)"7H/N.'Uqdy|Y=pvXdj?@NSُୀPMaŐj$޽uBPc_.ɫXw Zo?*{J[g̙~OQ}jT_}UbT fQPTU`宓د h%w(3 6x0 d>lU qR{_9&[>Oղ!P (mJ ~9UW$PQ 2C@j`8[ ٮc;7V {it\g)Q\c'Ad40b,ŧLK [= Z|KY X66nS<6͛7G5;)25\%Hĥ=PVTB_ Lc{Ç$R?pQ. ~''{cY?g$R_U z C5r]գ! EQ u($DS?U V D*UCّzjIu½H6 ~? ty'`S$=C*[\ `tcOT bVK{KHq W`TN_2g9vs DE]*ZIѱ#JUΊO9 \i]X;\|WI Xڕ±%7QޮE,ww \ T=|Yp:/%qE+1 BA^ȭws[guou:Fp-,,,,,,,,,,,,,,,( CL;>j/شiϨ(shnڵN~=7R>$hWGzN+O?|d+s@EC' `_OW$s"Xr +,SD{>)-x \m7F!L[;&unbwA]clv)3I:|/ #){Qer@$>@ܠV?PO'Lk~$ji`,!oth=λ]\kb_ L7c6x+Y^IEb3Y"<)( p\k_Rx"7[|^M@7c Nm DefX(ؒC$(*Ȇ@PTYMbPUj~˴\lDnp1WLO>EJ{z,rA3O&(MFJ:qsU;2ΠH|%wr}RxtTE8^Jm.]z o8u¦nO=RPGa'B[!v6kaaaaaaaaaaaaaaaJM` ąxqr2LѠMSR(\>W2?CJvWs/Zx Jջ,@- 6rs)X_[IiVnP8J('Pw 0$ZxU+ۈp \~b!/rCq-;T_1K{԰PQ;ʊb Sշ5Z U. # hBðb4=t׫T )iGYͱ1O|cձ^65k}T$2oC9,z9*܀  [o%hПkARW)b vbRߤR;,STondFC(`+W XZ L -3CM;&S˖- |(o)8%ChEj+ sŋk\ԭcJg!(WM|A+I{ebFaT6@+eg -L@"qX ЯR6 vR-o"?V ^JͪRJf&uǚ(϶UK -_}꯹; ,>'\b mq^dk%&LpN3)cx0˪6l/t8qhmreX^&^ԖѨy_Ps!82kaaaaaaaaaaaaaaaoP aT/&o)_x[Rvp6=`+mW$`{֭[mQ__vkL 5\`ᗼa= 0B@PCB2$uA Tа%x'_ خJ\)VЀ|RE^XeQa 2~14َ3uv_OJ +cu9u2\:GFIWU@'o-7XYFT/yUNjOq<5u̾ (ipoTp$S. >`[@UNa5B`1Q{rBS8|&:]16Pg 1Amgl^o:z)>Ob&='Qmج kM/:?|ɿw!:}~ld%Uh3]|kxt\=b/[<1 *'/[A|E؃S0(q] @ۖـA,K`m۶LGd dLW`.кk)Luy:UR.kp(ek)o},3Z/h^ M^ #1)giK幍((}?ŲlNS{, ZNe&P4xbQ7Kl$% T}`;BIRce `q-E-U}+)mu3z?&97MII~6kĉ> S`z l;ΩF@SҁWTL4Luuٷ0`R5wƖ@^'N/~~ iWRc2h kG9ux`B?6 /:h[9PrK|*xMӹ(;#-uo? STI뾥 Zg=Ǖ--0k9t.{9"E xǦL~Nе%K">IA98Yy ^*-%)qc\nCS!-M` AHUi|cY'8Em9">) PhC{A@`0jD@MuԆIn}@З^z'Jj`rG1V UwGjPv,+)QhL,#d-S~{kQ:@бM߀l &֬YQ@ ؎s%'jYIz-f;@/ZWnG=˲ˑ䋿Sz?jRAۋD+\eMu]reKG8o@m?We)zqR=_# ; hq\I8'HXƹ6QkEͲ(_Eǚje)]UJUo+ Pt Hlp*Q$}$ P/9sJIQ9;3aP5,}{=Դ7l6AE ,ִ|,ol:C>y󰊈>s.tW^P;#`:} <'ۡ{9䌺Џ#/WI2/C0T(V ڃܴIBNf[/q '@/>Jm*'k=y%P ~cYK_/,d9A=e{(g98˲?7YK_ufb/Y I)-/O36'nޟuR^N܌@۽@N`$A 5!JS`?R#uN~=De= Hc ?F 2V=@4^ X hf=ad=C |{1f Wc$ԹEb. R3xs6m<6ñm@! ha@c`OSe'@4cBaCI8Q(/,<POy81<PbnXù9Dz<[)t=pը/}Ts_;R'Bh;u%b/`,P2`l7 $qe|u5MNo)RY> \D1WԎ B d=e"իW{%=cC/^Q<>Se Pj&eb=@Kg;B|'=GdcY& c EMLd&f(oI~`+~ƨ=}@MPBp\gI}Xxq aFf%P6YM]xHU[XXX fHq`Z+VվVF+Vk %`apGRN>TE$*Mbkooaaaaa ` )}hC~<;Orx2S!@,y%dc+=ϑKoL“v|k/ԱmUo S-6^w=FFJpK{}b08H>YftE쓴6p;차s>ۥ) r+9U4rΓO:cF)ӧ[ɖ2c3gM@iS:1+Vrp͙d?}uʞ.3`3M+Vr<6m/b΋ 6-,߿?c%b m୔+X'ZXXXXp z~L֭եJl)CQ\pAԫWsTiZtiq]vi"; OYldZoO]610YL#+!C/@b-fޘ]M ,ҥիW`5\b-Zvޥ'7k@q7{ɒ%ic׫W5m&MaJNιzo-Wk0wa V&\CJ˻6G4q6b%]vM\]f;+{hVT<5hN+Vro5<6uy1Ytp5~Xc:xba'm'o7o։@P| SI菉u:uaÆzF7P P5- OF0}^rJ`Ao,O!C'b,Pגn&a@OVBTq /ğ  فjKynPbw.VH!rE)vX쥨[bl蔕NY~VF}p|'rSrzʢXɱGR.}8Y {5kܮr{:|nKRF]2jV2קFq#r $pk6\|7N:aoRznx5Hw[X1^juY=y%۠QFކN [bEjSAP7~_ +uij>˓4LW:5ߥ^ӯ_?Ek;뵜j/ _p+lW7-VGXIQNS>ڪnݺ }NOc@H&3n:rϞ=#y[Nj2D˔SRE'2+>4 }q9-t+9V-}rluѫIݽe]dwK]κ}K7jWVPY〸pG[o9⳰f~s:t W Y 8M;PgWY\2N:. >(, :e˖NPj``2)s <)zּysxw[hZjkyV3fU !܈ǿʖ-,\ (vܨ󙖿Cu pժU[xl1`{Mʕƍ.ZCb<|wp=m[ 6%E<Ӯ.7 ݿqKVOwo|+9V 7U۵{{]dgz޼+^\+Vrxp- ZXXXXXd5)=^*FV_m2d >`>} :AXF}KźUUzO3;^I!{V-?ڼI؎,|}O\@|WioTO?AXZ Px RN'>{T?[Ul߈u?7*aY<WV-J5"$d6lOF>3?-D&$ఠE}ڷoQGp Zb+p- ZXXXXX>}zFg *)qÂFNb;)Rą@A*YOd`J 6DSʗ/?{,vRF!/_h$/J1LⵣT'߉ldiӦMj\+V Zb\ RF@ 07n\Կ@f#'K8l{1znGlڴp'bAяz~]v }QƁj//x hѢ$<ۨdw . GrT_֮]FH]er--W?)hヒ>ppk?N ,+ Z5kŊ\ V0kaaaaawC>yގK t^=^-)rQ>H0|dWgϞYn07 ( $?M\%F)u=-TP$ˆRNA+YN[o5Ҳ-ߊK ?%So^{m${lx뭷zꫯ"FrJt׷[noɓ'ݗ^zg+6ka+p- Zb %WY\r"@Pz*>G3&lf[XN4*U8yζ zob n޾@jw'27bB>gj{H۔~m}#}Ν{nʅY^um%0lߏ?jy^uz WZe\+V Zb\ J:9r$P)zz*~tCr/Qo5loAՊ(Xu\,oR]%5-[7o^Or^_~7n … }ݮ _3fD~:eʔh< $Gu`B"3&D?_X6d._\V0kŊ\ {7P7ǎHt I-Z(z=(ǬW?>ڶm3N}SdlڶmuN8Ca`;eRz*O^_2 'O]f_'H+2ȷ?7ka+p- Zbž ,`*Mj>`JvW(U|e#H H as=[΃Y5-_맟~7 .eˢŋ' *]^W_}yԩ~df|^'u3? pިQ2qQ#vrnS_"'`MQǍ<H>|p=7K^{-Zb_ni#`۟}}qƂ7ǂsX1ݵ1+o}66/3Q(1k׊܋"ftk׮>+$@Z|_#%2r%'<1_|-uzm#~Pwmf23۸{ʕ+oY9:03p-,,,,,= :Ԭ ,VFdJ^`~X,ͫ"%gۡtEbYw~ydr(7CA,cN c:bĈYfnUx;Շt\rt2#>.F E-{ 1$:ǟ Q}oj6f5}  M]&bRqYXԦ<@ t/tIpL0@⛾ } 01A tv85knN\)iIѣN׽VZ{x2WLqmڵ9֫WRZ ZߔԿ2fҥѰa-u,%mq/v>uO z<إ^q/KQ{3zRx Z1kaajY(QvCJirx$EMLaD߾}pra MII}ddz3gByQ xyȢX &@(3C~pG?֍7opw=u&n 1l3ϮW= (S rϭ2ff~TB=vZOJhp- ZXXXXXXdSൖ7Q(]vłHzW`*l8@Qʩޠ3R UX[e;(pA+PƢYgL`QrTFR6zQ2=s<e` dd%zik\) ,7@fϞUULj0E 駟@\,(@^훡z>);[c6:9(|^)I5Cu3ڼp`;]t`^EA&RNq|J߹R>+M[9TWMCw/Cyn]w/˿CYcEcó pU?z{ }4[-ەN{e].͘,Щ^ulkUMJNs[p׽쒚ds}^>< 1mj͚5Ymb 6u7m_j󐌇f ۘCP qMކ5̈ƿ[} ff0 |wZľCYWuG?nw;̝ &>k!mM5qSJ2@eܝO3uOwS>tWyxbouta|;K \ ) l׮]XS8]B\ѢE]"EPr5 (XePV># ŋGC)PgN*ZN|i  v*Tp9c.Ͼ$5.j.m+J(bNujڴӀz2X_35q8A<ձ8t,'ڛ-?)ƀcUo*ӨQ?[w:Ŋs$>_ b@L?ܕƸnvҩ E';Xm]]5ܗ6z`zӴzP,H%uj{,IN&߶N-b/u?<_/[Ե{!~ɰW<4(ϿѾ|1 ;X% ا ʟ1e9J,c҅;ʇ{_<|e~ѮHB;m:չ{pM^g-*P0.v:b]b%Lm`-}3t p*(lk9k8!+hРA O|-l|:-W#zN縎}{/%?>!?ah Ь{ߓ Nky2ef %]ŃS`pxЉeggW-!=d3~"KCxLwzSuմmrI@~+ڝp2yˆruQVaie~qp>($FL<͌7>6\-sO7ck/Srmtg_;1kzޥ-KU4Jfp-,,,,,,Jڡ|HNa0zo)Yd6  Xa@ɴӠ:{czyu+V&hT)ժU> wd+Bjժ1HQ^Nۨ_jмFC ! V֑g)ɅЫ pLП'k'?~'hXQZǰ 5 p|bЕ}ʟE t$;ȍ\kw=ZivV(l@VR|ݮZ2~6kz aC]93J\w;֨]}+qAXbE9rtlN,3b^P^'WI [l /^Q) $Y )0Z`~ ojWUVU]&uk;畻Aŋ%ꝶ6}v_7T4û5{o[AVRi&Cg<Ь Lװ#QuիWt_-[v˳=*#'kkc9j-G}cQ$UTiLNgsNؿpo kFOaՄ>Xr!×}›@~Sk&#,/öb@;iym,|+'Xr-@jT#ۯE*jjv+S,Ľ]nٱInۂ:k\pm< qgǯۗ= |-3v'_|_7l?+sHM_?=G|Я %,"sR>cp-,,,,,,YxJGg˯㢘V@e7Lԭ|bTJJ` Ȕ}| NHy=-6;X>%E?jJi@x~l iоY*IW WKpQ Ķ`TZP)օmh볍'$+C4X!@&VaU'W΢MAUZ}ё$f%Y6I>MMz2_~൑zvg#%-ͪKM }qT'uۀ @^ &@r^%9PDZlDDcAmN`#TGw֢Wn9TMڇ&$nIA෴Gcok{@#6 ԏk+]+[.5(? K'.wnׅfzKOr/-]8]=lS1`NM/ٝvp/#Cy3oL,z{>o; ɶK,~}&˼tkٮ_ޝ|}0uL۶rHԫpQ6o[:u<뵮_uEtEEJZ2ܯLR=lH׼Y᤮%x`mՃȒE(Ac~xM3,a)!lZI)[ 4\mזf.{G?]Gh[{‰@\}X"`i&o}^q `=WA~/ns rpow^Wsl :r'`U`Ww1mh~׮l)L'E ėS+V a@IH@njZk5 +eJR)HN/Ea؇_⛋W, lBxWnP?V Ѭ>(Qۡ7.ĒA?|ftK|5bO_aFXKk>lqDA_=HԦMJg8q >\dmYo%A'<<8EWgp!)f|->w?ࢢůko寧≽xh+puĜ]Ӯ𞸩˗-WA 5^q]q?T([ g^nx́c˞?dJ,lmƧ4|w+Z}d 0TCr6S+]ڍ9W\m%2 pIE߷gwC\rk,\ W\ Dxd``\:`i]8+YO< )(<^TOx⃫NW Z$!C5AKQ2h0ےA`l Akrb@[,+˰M!YU:DlN1Y~Dgm^Lm$I=Ƥ 1} V?a] E1Ma9@[ BҜd=pG'+KXD7vhX߭Vr3>_b1#jwlO\ڇUB3IW6rEvvgo3R⁛B{hWI}O-RYmw, C\:[y(n [sm5k6ȵ8@l6fp.>i&+UfDq;=5`X 3}C>1?`n~,|xh{'VC-A2Px+H,Az|j} aP_'L v MZ$[/7'GwV~=rn\w pQPT~5ȸ~pǜ/Ƥ^KwԳb"7x}xW Z01%Aٍk1!wT튰bq\G+k& L=fpI>C0X׀sIRd̏Bԫoo)(MOE`h/V$b۔?>JLf?$ |n5_VuUC1[/@1@ 5ؽ5`$KnɄαDKs K$$ȡ>v'nP|5R}tvM>ؾ pQ_v\gݥXC38{|=A:^FU{ ྽bpiiEȗ/{|(hI~ƱLIM~6y k}{R̺#9= f7q㺗-k#ƨ X >XzY%(bE˺ܧ.P0= X!T_{+Lxx.pb6r!).wS.Wqe+{mfd&Į$`S*5@T;\԰7O cwOHS=p^=]#=Q~ O l}dtqpi>E,>74wӨ]>[hw]_S VfN d}zu p+Oݶ13}b_C[F*W >zI羪ks--֝ea4xO:.=$O\}w$A/`_;Kj((x{^bG$=\ԹUE}{GFP/O1.:?cSi'.4c{"Szw63}l2{[ؖ-["W}.8~6p} t^Y^[ۨ5S;Vu gd9ףG$_?q sj;3`ECM%g leG`ԇ'^"oШ݉ 1ԖT@]z=Y;|JZvVqLr49$Gp-, ZXXXXXX4 ~w>lrA@ *I$0hX|}oUV?b\!P*K2gTj2}O4QR_TQ# 6_+@UxI_Zʫd)ݢ-}~ Fס@H!]<iBx:whPj;UA P:mra ȟ:mnmZ BUvO\qFnC&ɾeG5aﲿm)<b_Ɖ+gK=o}YbW'K}}ı'IvU:wk\[#J 8D.??\oOJ2M4:a׬%Wg/{[h8ֿd\J=8?XijIJ?4#+7Lΰ@!g|qțY;w@+Wܠq/lxg0Yw̓\ {ত`2d_bE5l`jbp,>$K4}$9Lu5z}>dj$Y)"+7cDbA ԟ"Ex^ XTHvQ*i+>8%)H7q.6wWPomoGuN,X({ puOC{?Y6])A7( 7.m+3-jp'^> pv۞ oi P-{v pQŽGWDlD;\UofcIƶۼé3op-,,,,,,  JLN IXF N6 V4\2$'.H@0J5;/U?[ux\ \V=c2P6b$S}'2e?<#%O[%;Vۚ_L'U'5~\]G1fPM x x6nܸOW%=Z#yEWijW КkcRJ:1#O(pCjPH|Q {{BK^[nm |:ڿ ͨ};q~Iw +>i5spy^uvjZ pS2:t?~1A׮h֬Yє)S+pB$غ",vS.+!?{JXf۶m!'&|IܟO"E;- 5T\~R{n%qZCj:r,\iyam>.U+sy:`$g %\ { r~Šjd*hc~Ygʹ{c5eRN#uDىʴ]va~&v` exjN JSP) WSJGHFqc)ݱ~ >?~|tR0}n/Wyn**JJ1_6W3n;~ @%PVm(z77Rމ`# eJP=~վD F )1OZ~ H,e㽲5=\z\VQ Km(wv̈́zRr+ֵjv-RkФPפew|vp.});)Iw)珺|9] 5md'R~ wfcӘs (kנ>}Kkv@{qVt]zŻ L_g8{%L ޴dj>cG s<]9$!9? t5;()4 W 5 A1k\Wb,(**Q(H\3`HVb`jmf|9 ^;N^vd / LZvc̀߂. ER掆jpԲ%w m`{bwN8+[%y>Ź.W&n?kkYeYefHTve6L2f9Y+!Qb.:̉ƌCk @g ʐ=NMOFwa>LEl,MUl`5"Y1Xe񣥳:dȐ<՚CuJ9kK ѳ>(o rq ebj?')0.*Pfʔb"Τ6:@n:a@ķ չ/7!k p->#@Rwy kؑ x_.YWCb\bn2=N.{+ۤ. ĶھF|3,߱Y +KFoeW;&ܐ}ѧ]9N\2,˲,k A:u]4dK&;l^i$>X 4I&U!~ pձ'4 d}9٨dz(h&^ 0]w(WRc, cJG\=q>ɪn:,Lԓ큛 p,0lxչI;K^z@m˶8Y䬑5vJgٖ>ׂnUpn{_٣e6($dpRJ ^֣#2A xpt>~+$YJkJo2eJXO?41ǀOG*{Szoʒ_.`WS)2K-ꠏO:zL=@5಍ĝ/ uϕm;Bv6wůұ붹ݶ6@)5 m[ԌP,^#yopl>P8~^Lƾ WI^䔿fx@.* < EG EV_@i\n*-_sljOO@ؿp`hpuZn\plײ p-˲,ZL5Cǔ̧'ൔ:n ke5ݭ ٤IGF |iDN $[.١Qz/*BǕ#l Qq1o9V8V93Tk׮)ʭo}t;IJ!̫\Ef-Y_(abe:,b&!X<\ǚ_t2s\̱ecdbeAf[_WƧU,e26 da z7y >0pInrYf2oj*-Ex=옚QkwmWf|Qe/-Ngx}N'X>~7o|~5;QcI{4;?tS<'| X0tOwwl9'zwr'* <5ce;LֱOSe%s>.ٝXE0!ZQmjZ^Bdz\Ap @ĉٳgb1V4J(wdGF|ϨL+bN5 7G2tM.-AMxI=ހpWB?:;}R= Kr&QyyF/tcy׉Weyȭ!6^-e[N\lnT۴|ŸYח➂$aDsl?lr|) >`,0/dr۝%M>G3ͺgf Gdfݼ8;uu@SX&yeZeYn߾}N6QՄ&TZ^M2ZkȆS( 4yV+)B ?)8:o% &w p)@ 0Ru_d2y:㝙(L`t q{#e*cAk豇LX5e@8,mSғʔRFs ZJ.)cv~$;HAaG079~I4$(#;FPF[}v]Upv[:PG<}VG.xxC=͞{'~Kx&7/S}RiY1T\GoY!^Ӻr}m9oe\ˊ\e!-#rniVeO&wz787S?J&/lzjyMbߘ4 VX'g<{K<'x}j.@1`Ax% c+z݇6I&}<P2gMy@:{QId*+VJY\:je^S':`ે)"4qfgDLu--Ƚ#wrG @l7eYy _.OڣyY\ݴB|e+9+[n;_ޟx,7 /ά_iMٲ-{_f9#.p7vyq)kKxhRHnbpA]Cr\>95p6 p p-˲,7?E0MM%]BV_!ﳽCA҉÷3k YMe=uV('u\I FXH,$W 2v#l":-[LV(=i+7Ǖ q%ہ/[ҡsq nr- ٤\^!1 "U2ph w*؊s}c[9qݼyv 3lxtrx&IvC> ;o1Pum8lczD@ BՈ܁YW @_JeH^<ލ{V!~涛ق %`TVv$6-Cf,HeuKef{~d;K^|GS}JG][*CxMdV)w~ ŚT볁!Rʜӊ+U]խiƶ2]vhY[a @}W M+3!QQܺbfen y&7{/Fd߮J*u;>'o!p{#=)Hd2rOQ?}U{BAӥ^yu&Bɐ|SkZo eCQ.YL2petNr,lӽoUP"FP`1hР`~x%K"&=#)yO?^{o?LPPѻ.B|R>*m[mcY /o\cCJY]>">a6%w4&ŷrҳ똱Yhfh "c!8{DjU ;yǞyuZo,TMoktcy\v'.e:IC2 p-\˲,˲[;s$'t A,fizAUCSQFd)̖+uaUO`* % G&bdid 0!'hHk}:@b%dzN (eXg=Xv˶CbvtL++GWu[WCLGu' t}y38}(Kg h/p,s"]_d'7oa#G`:8>o8/OXt#ff08pWkx瘓Cou lyN~I\lVld fBg?`|%DbPPϺwʷ )K)X($m7/<=gᓨItmqsOU[kXgv S/w{=~{{'G;xގ-X Y?*w¯i[}3v 6%yϾ%s%rO(:STO(z2.'+IuYlO~܇2u&?Vcr4xĖ/[lQ<4(zQGYdD_q߿ju/^|L&s_'~E=.(ZzPuԞu;qOk߾:p3v @;_5=_he ,*}|sa]62ɤelC2sʇ seQadyCno42,]2Oob|GdM'(ˉc XIPMg+ cʁsfZeYe"-tvd.፛drX h#n W2}Ȋ8 V2aԩS#  2ۘ. .kkO꼆{xG}IݬsnSO=E|ɦ%ɷb`.FD}T~N~<#K[dr{iaJ+#4yqK,|HtÊQ7ܯu9˻$"!;wp͆@Q@n]ec$7[v}""l#IXK-I+8ovmr*06g9$+On#LsWyekYeYpIJ Oe"YNtV52V: P2`ҡLǙ f&#,2۠SL'^, DG[J0I,N:)t5}2kd۬հP&2l}΄ R.1'^2S H#ߓm|FG .M-BX2%F /Fp:q/uku.u- ԇC2/Zl%{ z>8X>#Rԑ|>syeH`lsy羔`8o=N=cqNJJ<%/< ޯs?Ywc} \.eYeY%pMg}:ft,f%36dr.OdwtjMd KF(Hp5Pf) :[f-C]t<.S&e27";IP&9rLMyy:O-: ){AaYRّW9ss XS21LV3825u۩+7K=k u7YmzvQ`$ܯxG xWTcˮ>ٱ3fz{e{[oM`Y999޳ p+)vv>K\AɺmkYeYֲb.*0AYFy&#)jgQ04 ĺ&\\e: p-+XiH*m3< ps4DDvZfw\˲,25-Do&ekℼ V]Ic/a0,\I06'N'k(:ͩW^>ſݮeYek[ov'ԩh}pؕ1ak:m~-<;z'X'}ɋcY \˲,25-!1h xj<M4I--X+0 p\k觟~O@#4H}kXsMOvc>yakYeYVq%SNaf0gnf /No E: p-ak"&Bf"NdŇzh ;cDӦM|2pO.^8}R-˲,ײ p p\0>Z`A4cƌ`&OF=a ڲb%O?=:tL,X{ 6 pcK) 7djYeYe:akQ={ ޓa }饗.]5o[e I''/>m۶#S_~-Zy⯿:Gϝ;sAܐyK, ,˲,\25u8 p-\*E^QΝ)SDzm&ѬY뮻[2pP\_+vgݐ+pK?̘:uj&+C'ݲ,˲ p-0 p\(=zt4rȨo߾Q="e[7PcR'x`͟1JuKo9䐸qkvUϟ?pd >2{oeYeZak:VꫯF>hxꪫ"eٞݢE)͛7_'pBIȚ, ܋.qݱx?G2!N2pySn…̲,˲ p-k%R m,\^詧'zOӃV6WW^ܬYywer2nIfoC|w>ѯoy,˲,k)kUh 7LW&n <|xd²;,[[&,[nܨQEج`mnݺu Zpd*я?7dಬeYeY}CO!FRusΙpߜ8F|eȠ:+Nʙ9 ˙ñ Bk~R|xs/>F쬕+^|Gf2ŒK/4ӄfg̘Q/zhɒ% nO?MolmMdd},˲uXx+W.ULH1^=hX#U*Tp\+ӯ u Nux=Ǎ9-* Tu+W2ZxujyhLnݳ90={ƽ{sNݻGC qgc=PU ]vѼy5m/mذa׭XN ѴiB&~nIX nPgxߒ-YeYjժUX1l Q c5͊M:\9VA4Xqe8k֬Z#G @nرlƒ `СCܗm]`Ynɺ% ؞)lŋd[eYֺ+":S# wܡeYeYeC[4hР~„ a~,YFG}t/R ʢѣG+him n V3gδeYe:S#8":]v%*]O<48J4R`TXgY>ñcR+СCjj4yd_VФIEĉ?8d.Z[A`69Y];BV63kYeYzO>9:묳=#*[OeYUb2,k}ԩu-{'W~+V+唁`ml%ʶ}^w_|t-OeYeYeYeYeYVI? Џ)>[}_,? % lYeYeYeYeYeȢ.Y$c̙.Ж>[j˲,˲,˲,˲,˲$-0lZ^_$ϟ"XK=|xb@˲,˲,˲,˲,˲) n7@YZ #pkhkYeYeYeYeYMkYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeY֪VΝ#GF]vYN;ET'Ų,˲,˲,ZoZVSNW_};>)eYw}7СCK/E^ziTzu\˲,˲,˲,ko>/ѣGG'O,˲/u5jݺu,˲,˲,˲5A5jԈ>eY^sOGgLwO}\p8p8UVs#4hPµ,˲J_~e fVoFp8p8j2e7x \r G}#oYe_:ujxW^Ԯ]Fʕ{J>p8p8{nrŭׅ ⎼eY~7ވxhΜ9Qƍ+z3˲,˲,˲,k6l}+˿0'1,˲;M4)#GF{nۦRp8p8ժUKtMرcCFǏwG޲,Z4`讻5j(A3p-˲,˲,˲5B5jԈZno3fL4}t˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲5*1bDoDZhhΜ9G}-Z(5kOeYeYeYeYeY֪; >(^&M}t-˲,˲,˲,˲,˲{/z_=ݹs^xċ/]e?`m-x[zrǎ^?_,˲,˲,˲,˲,*iuu޷O??;,>#?< .޽{:DSL}ѐۭ[eYeYeYeYeYV g Wp{GC=4G7G  7K/4BkeߖzݻGӧO7'Բ,˲w1Df͚&laYeY*J$X_щ'"Q˖-]wu<'[lE8Ž/[&'JAn晧~z|I'G}t&x{GƇrH,XY#YK3/"iYe裏9sDcƌ믏h„ 7xc8N54'#ԩStGp^zin2ecYeYA_lvcOOٳgGmڴviTP!UVK I23g cǎj+7&3WKflx`$oM[˲,ZgnƍG 6 p( 4XO!N81ܬM4Ix=cg/),ñ h{G>`yEv}w@h=eGXQVñEǪ}ucMn :@H!dĈ=)>ի!+h}:sX;vl졇 }$+w…,X+6V_/ֶWٹod9?_~,˲ ap 7}>qE7˗db.ߠA0sOf ~2dȹ{GU `aWd˪ ᩵D{Wtyn6~=as=7le̞s9x/pԏ<ʾm[y袋9GժUvmG*TLs8Va?7} 6_V,*6o(8פcUׁnRu-6_XJ+Upn/}Wƚ#OLVi' (I"YDm49tҿ)+8ڊrn{ףÇh"x+K7ꫯ>5kV UM|ui_dO?>eYejݺudwL8 %P l0HCS"8KL#yLWF[a=^Ԯ];Wn@]`.g}o˾s{'o}.ΦlliӦXd"1+x/׼2ARǑ?T|pPOrujv{t?X%bfe׍ݡmT/קc @) \[QC~2U/kժ;x}:J@F+?!KPһwv!i%iӭ'=87zQ,:Z+~cї& Llƫ8,Y2B}ڐysٖ[eYknMըQlWx)>Y{gH>V'Fm2g1N>=`nLUl=pp+_@\tWF*UVbw-ONYm%@wd^qOա*WHC9Ν?#[ya=IOicUT]wCcCJfkUk\5nO-ګ[SOgs]ps^:{X}5MfYH>Q?jF2~BERxsTm /ogȊW-;*~ayRfn, }K.Ϗȧz*d!ٖ!eYV 5.R{'ٱ \y+dbڶmCv;`kHlU%ʘݏnP6d= ƺ˶`@$eкh*3bq<}&[A޴5f`O/vկ_}1ܨ eR{et1|'kʨ ^ve!kK62tw>*k"W?ZbWMvq8VY|5W|2p١ڈ7UЄ$URbfw>S'Έ cs}ux.$0 .+F2#4iJN:åшHQKmbUXg|vE./Q)=X)0IHYv˵"z8ܹs#KLlfYeYk j*X)X6sdnѣGV ]V0-\d&6 xڒ++TcZJv 3mt7|+ &,o q-˲V_~e4thoMdv`m&zݛի7S tH{Re?p&~ϞbIO, "!'7S2lO籾DmlXyeXƲD}!܅*Kʾ?;5>3-«^} p-\2u8 pի<"NNI%F#/$V`tvK]YoI̙#$Jyx9faÆła3e=0[eYZ'A^ }1㧞XU4L_VJ|IjllX5eg}?I@X1@mHp nZN&+W  †A>T[7X($~repZ<1OZY)zq#Fc{k#% úl]7 5-er~;M1{~#\};&SN  p-˲[s'G48K0&.g̘\_2dn:FV Ge], be~%%W (b-.Uv/W R,#/jX"(<~ѓգx5*pnpdհx/c򝺏F2hxn:4pzRL]hak:VzgDƍoAI?Dvn)pO('PY14/h+'Eb΋/U4pյJĎ,˲J_$@KCE7B2l@_QcU:Uǖ.]:Zj-2pr[ +{\.a9$p: hp#e6X(lJ9d~ن(Gj4d&p5\[5槗講xf3K'7uKXKDmFG}O p-\2u8 p矇~ s.A$aMJĔ0 w4k,_Js,pCE[ApeЙ И I,˲,kGHJz2<EL*իW|ù9E{Yg*%dj3@h,b )6,aD!jK櫀53խ[ , *\{} M}+ Utb:5I%d"=]~+1[ 8ÛX_Oq>kPaՠmL[ZepZ K6%DJ`@}42ISj䓰>gVR%{ώN=Ԩ_~a=cW|ֵ^]yL6^5Y8N02u8 p-\ZHМϣMVZApn~Z&շow%p|8ٷ}>Df@\^-˲,kZfN^tE3ό0GCX )i`IHx*+vY=bVNF.lBݻw3?e;2 ÍMf͚՘qaw=BK_~eH:}2i?6 `LYn!HX?>3 p\0+FɊ- HT #S>^f.LA;s pKO ڞFjײ,Z{8 AMpg%\m8 KC %:mڴ9Cx. & @JF/"m3kرc3, F.C :4=zt7@ @,Z>ǖ [n .d5a„;t1A/PmԩS)6eh 5|T1e6Z4ļ.Olcb=&2eJA!hy#>@ >^OIFY:(I88y(,c9IhѢ0s;ӗJR!xSdCڅ$cfFaO,BeYe7PPbS͔gV@nǎוHn4iȤQAC-VANJ1 ]l0h\[66Y?@8R^iWK工th~s֡U Z/ԤͱUE>y>)7bA甑Y综ǟ_P(ι7|7ۧSN tĹd;0a[?V:̯zo{:<posFmpM&TӲZ%&X}PA)uVz=#77x@ԝ<ʯN.9GCp :> AGcC|J_ K򊈎IK%]pހt941e` lAsO(c93¾/ BG%kdMS{u+0 [~xZZSg;+GA1edERz:yn2L)^.uW2@_ Cz0E=uԑd0zm]#>ۃ0::jpºԣٰ=L,o=:lua$ >(Yzԧr1p/y]s\`ey i-2X*) ^)88VHF@{.:k2FJی6 m2GFq ӖJdQ싶Lv3ITzclQ73墬m?IX&}Ǩ>Fj3$2dؼ1a(?5}'pB; gkkYeY+]4 PE'! 0XCXovA3;[bРALca:XN^sذa5npuoJ`S~0ґ)ʸ{:44ꁎ@W~a4XGɻF5*c{}W _:lmWcI{j0sv긟ѹj27$C9IA^'o7}1snt. l>1N E5{i!l p'at7ٷGU^ N c6w/sGT-|1[ƣ|P=-?EAg=՝+ ^?ѹ;vC=7=c <آ^z^ɾ>F7ĂI9$;_z}Gms0B^;G:Nbiչ,ZooLpv,֨ 6RǔNt7.up+@.ñUos5P]2Hո?0/ yij 2-/|ƺgϜqg7),A./Bb֟ݧņ.g܏/f_O|WR@c+XV&@^.'m?LG}:avU<_SHHU}kzJ[d]w dܤ]ԃu0TS|uw>t}M1}[=-GMIF 2k,2F"Y+ >LZeYv\@' & F 5ëb}BC:4i,إ Bj X.S&VXŚ.Vg"V,-?H˖V"=VdMX}>?ֲɜVߴ5`Ǜoi\|J[VsG?uKӰ|6`ٯG|';7Hm?r?>?C)--6hww%u˕-t lBwAPRl2qA$9v׋nqAzWx| % X-]w}R)}Hudup `t} JG=uL] TJT#Hͧ 3:AejwI,TG2eI |ɪ=a/ysk^k֥b9U:_eMk?{-L+>)hLP*KF'~yjKOm3~-~G|];A{hY>qm+rt.] SפO{;'˹Y >eZq<}Ze]h^S[#:BXudm=tl{ c \Su&) TG'|F9sCHUT-/Sr{H}qP~>,4>h8k;'s~I7D+e )sḵ~e /"d:犯펫WE 9j)k@2.(ނQk-Kw_ʑOw*9Koie&;x, VVڇ*vq/W',]k~uv׫:xdxȣhM0|.5״@F덡To;W:!;.'7Ip_Ձ<6<daByU d;VݱI2cE8u?9_>V^pwYٓ)΄W6!2صu6qnmǭ&@{G<ɇ/aڵ{kp}th~e'6(CpM(Rjo_~Nw}q̝hwN`=mo H.77XSk:H ]nIoCI:'i+qu00طpa ˇB4 h̀ Y "VlxJ6^sRn{ag2jy~܇RuƧEZ:gSzu]ޡѷ{<6w{OJɂzO!> zlNP[i3=;Q8w[Rݿ*snyǃ 0htȈ.iX˯>ߝ8&~!öud>nscg@׌.}6fO=q^B3(7'gla`O~;dxӿ!.;bи#O`c>ZpWb`hSPuf*-qہa.O5f p7gp-*Pڒ[|Hqe$CUݟq6/ڨn%]G6z~SkvvZJz:MQϩg[my%`ǖjM8fX) \e{ҝwޙmР']t>Mf5BZeY\jVc#dEА%F%'.Y ,_d Bi2udTi2$Y:x d,pI6 t:%zK-ft'{=\?Ahj0D#rHxs]vtcy0EqU%27H9YmY _':7(q'߫ߋug'qtP(Kr# *ʾL~W_l&v?-q\MK_w촴kn.}ofn1s-nolxR˞>2Cn>B8 2߬L2oޱZ-߀xK60 ^@ PCyl -kR6mڄa%pK|_SȑP=6y,GHdBKQM,IzX,P[N~9ud_{DMfs[ {Lw-u+Q7wB.mߡe@~hZ .p/g nIܹ?f~P,7?:=~AmZY6d/76m L(Mf^#IV_լY34P p>vhi@;@5bQen UaEۥ=alDnh,7i$ 1# F8p8J<|ph\s5) 3e<0;p (A'%_deůcҹhx=Ua*[}Ѐ3̘ 7D;&#Q^ZRtoK N0e$}@G7R}Y x;Eȶ/6R5dZzDŽ}67\=y.BXg9o@^]355:K rSKH]OIq rX $) jȜc^:dq[򠈠l<,-Jp@ʣLP[n%I` uVQϺo68_A.v: N:KǏ %p'엫m, ҰOY&ߥU˛,{reۚkZ|Y6lC;UmGN uI\2l<|nvS"(a p p-˲,Zl5fStܩvtS401aO`ȓ{4i lmP^ 9:uc~0Ѐu5S.ŴdFA4|&^g q &2Ch8CŒr2t Y@Qx9엎 I p@Jdch,H,9mc%žd{13E_~;@y2mwMyAK-sBFiIܼΖBK.Xjۍ<FS6Ο>ZpMjGZK9Ǽ|VٷF-76?x;u·a>~`)?@lf[sKvPRs.u<% pipS.̂._I}Y~ݎX#ƒ;scP]]c/=L[NLz?w@nԢEmĬ( ;cm65,˲,k\u|xSOVQj+5vj`Stg_pt].C:tt/0e̒. cmc4 iDJ^0&@{ow ClWd1OlAXH#_fn9#Uuub@yun1p(SPnBq2!چ` zE4Φ;H;J,%Y΅uL{e'_eqXfmSmY%2mɔ=t]*3>Og{t&Xc7#nz.ZOPO\_w ]uYQ:u:K,zBQAIF=z!Tpz!O0w1$v@'7xЭ:^qʶ%#+(6}}01ڒ# b`lQ&=.0V1w%2#83YqA~շ_($01%/ww+u2U\H,V\sIDȯ-&߮uA{Qc1,Jx4ODaÆyLnt~R+4\\˲,˲V"M&Qt:j|Agig4XTy:4ro`QpLj)-[v12 +zS]10߫gu2u@Ԉd :Mފ^I>pΤ^cUёM˰4d`rO/\1 pӐ&GŊ?PLc:oyaw&ZMoqɬns* *E*R !HeyPE#n^%7*I(МRѬ$]gqNC9=>kzzY<@HyQ~&();g̊?&%\OtwgťKmJrg]ڭQ$f7 zcفB9⧟Tdܴh"&r*e'5bH5.פ_UW]Jqۢ~ 4fV.U7(ůZ*h[19&M'm5r`rH~ e#ū {~@fâDzػPPƿ2=u C;$wp.R%.y겱 ㆄPnWM{QV WG?4Rvz61LrݥqR#'u<&4!~ұ\}4}+T.ko֧e2=#K ZheKjXcU[n/}lp .W} p`ñ j48#RL8@ ^ˠ55@.@Q0J+ _otϤ xR!sY8z8PKPʫw}!=Wz_Ň"dH\ @xCY)Wcc\Q|J DƗ["CUIMs{tl(%9Je9ҋN -k_rRS P3\`W+񺧞p|Y^Neo=ƹQ8? :(Qi];!{wsqL pvYI}ϿO6hj{j|GПy'JyRʮ`֭.zhu}={ \w<`O~IV|(7a<t?*pH{ܧu?|J/X6p)?ff~\q/wa} pmMp{=` LuSgګ}}pԘ~!{8\fTPNHH8}V…L/L苘Т/b  m۶u&'{ qʹ0Q"UnбcXʕ뿮2\3333332`w}~K H\e.@8Y.y2p {-h$-^+ psvI˃>Oa0j`)v;zᗫp>8J[]7p =;8!  A2*.mWrMVehNBA_RZar\@\OUW\=7onڹtPy>6̨Y/_e*H`P߰@܉׽ᆱRW8+I^jPp_p)ְ,xk7u!W>^yt<bYͅ},R\o]ԗ~N|%nD'c(ohFo [o:ϩon #$q˄=ۅݥE:[p79` wK`Ҁk'pWCvM= ذv81+hOA压ڥ}V0qkml;{1W*w?ਣ1٦ @Gp;(&臷onpvZ٥KzvD{ "и.n m|]ۆS:w^-׼04,jkJo p}Ѓ?.h9lk"ڨ)h,R3}3b1`s]*Ok2X i 5n23xMc:Yʒc`U;xSNqK@![l]~;v"8^V W_n@ u߀, ~4ɕ?wi=&@ҳ^aTfv Ddz.m>\(f3n'}#e־~>.g*ƃ[O@0vk(7pCͫ< ܀:3Pԃ..K.r^׻Anv*ti2_:>H]djL. >y{JՃ p^)Ck6O99\8iU G=poX4_Da?fi+&(p? p)C45k&nV?W!=Ϫ@GÆXڵ?]:*Է0qX8'Y4qVB}Uo嵳1S*J"};}(Ŏ;ƅN ;<}v/_qQYkjГ_|/X<;6pǓ8f?$yi\U69eN{$ZS쐶A7^߿Q?\VFQ{ҿz7QC?1 mLp UvErn곋ЗR3P@<ؽj^MbPnJ{wnIa$\{. 5333333{.G"AiYfV j5XzꫯNTP5o> jժ]]k˿_a2.*}ƺ 4:e!G`ѐayON:)ͮ"-wiJYt{tΜ^]SpaFi'Bw F~Z;@6x%.XK?s#>Z'wڹ`ٝYJ/)-zw4CLf9O-km@\iڠ|wۥG"mBʍ;WP_wq೽ݿuunS,.n PLA)x_;vn6;P:2*\0+ H='PH?iXL TɭӍCXhۻwv+ӈGЫ*cG6VC_2*>ӯ3>r}BLm ?ı7·jժ۴i XΜ9;D&$m&p (d@`Rʆ 8(8C"E3t^~B1CQ/PS5`P9/]" p fЕkY ԗ|O S -HwiÀT-8PV5(n ]~`b\[i}oR%}WY|wus'g=X>. ՟ӡP6yRM V.p3=}H؃pɸ8#n~ViE;S_ 8?rv+;?p'^'21ȦM ]Uu֥ O~TcRCϢ/0Os;@1i`1Wf\uO ƿsO8q(_v0s?7hcgNV5 8v ur i\fffffff @sMѢE4`x|%f7g`Z=XgP (.L VkWjslL 384o\A>fAgŃp]n/R +8sE*P\hgQcsIW wc=N(kD V9oPf.0tΒp6%ugO sxB8\p^8OO ߘZ|rP2S.^}9MG=: ̸6Hmq6h4=p τ=; ֵx@iY.%.~WIu55Mw&865p] PXVE JM Vl @S?9I3 kSzѧ2F\. ζ M-LI)WDc@y/)' F $>W/z?0n'AxCBY=!Vp HEP7c& M&=L}فʾ:iF+p=$^RNT-|.N u.*bTū~ZA%ڄ,,1O N$x h/qiho;u,ҖI%7@ScX{5YCoZ -#|jo(Ygd6+|;cM>׊|'(]wun"xqq]l/y˔)O}{O3k0 58EԠ($/)W&}~3'7K(%^`\+z±&^B"NR+iO)IVi ER-T0,_pS\NJ*V,V$gT';PHpS3`6x1jw*·Ymv1C,we4U3|з+J?08Kf\֤%{_A\~Ji٠rlhDG xy8#Uz:yτFKVspIo)^{{>.@tO_ßqF6R]LRwƧq*] N~%W 07S&lF2&;>|g?qҁnjUCLPD2[F?xL}bmg:\]owGi۟M'Cp{CG=QDN濅׌2 mݝ@CcF}q?  p22D=VcpkIa6|ATàkq98iU4ߦm y7¨.hWD <r| |ɓw'[)H;UnҴk4S# Xj0 / u3e!u wqMߠ2jXDQݓm}7.+xs^)߯VſU;s6T۷o_qB;]+{us@Zޟ.] 6)okL7-;[/(]Iݺnm*9ר7>Ç.Wƹx.Ti&Wg,zگ?0ݏ_[-Z.߿[Z.RNOY/m޵}7KQRN@U \_Yw]{mt$}j幞OQ2\$eʆf,ұ/ ?;7q:G=7>W}ߥЧ_m/p Czݔ=ۇ/4kOvy$;[g~SߍuD8H>E3[}{(pĹ: 6OI{R[>?>Rޣ~uOA\Ʀ Bٿ>R:L&mb*pp嗻5&cZ>ƯM.]VhIp ۬)ro.QD%Kp e2e` hU pn“nh`CUX=DepJ\ VWu% Qjks|| y@Uaq +RUYR ?81^@Ti(k0U^׀@ i; h]6poz X+%qz}[7l/ p7T~e 7tSWbG| ^m.!pChmJAR\ Pq,z{Ν'[ʖ;W 4ptCCܿMׇ%*I˺11=>}iKD~q΁C:]NT=dltրe_pY~J|]{ Hl >GK5ɯO6{v ŝ38%.-ӝÑx3\T~gnS3#p.;+'?De5ei.p0D1ZOUg=c 0f񛻲J}SqmOh~q%B~G1G~%.L1nJq ߑ;+觸jFk7n>c;\k5ZLq^z*gp e'#/Q" hAZE7"Բ>^~p裏hPltti(\p& < !/xk8}|Tc@ z8:\߹(9z(R'H§R[׬  ,'.8d^'h2E;Gy[prez;C"(>45 {TKkuɂ%$j6 }(`A^)?8}e?%KK ]|'oxD NͺPUH_Y|KgEa8\pӶ;ۙ۝A_JiDmSYs ?_8=f٧>sֹ^* ,_&ڬ2L{ 0SUL׭g6 H?t|҂?3dr&?tR&ZTG9^@ܕQx%9?1iL%{q\fi7JRY }OgnHM;:'[&ep,~'Bj'f!~|&nҏ|E97Wï7q?$-iOӒϬp=j5>>6B^-˒g(keK?8*elEg>іhÌgiA2fnRoV>I1Ʀ^AV2>K;%\1veu7g*=0M.\pVZڵTzu7fr0k,.Ơ^@7">0x02p+o x̠Zِ<+QWw~i-j\xCA<Lu93Ђyp'\@{<3Z@~wib`u940PgП96 @ؗeBp=@W` /K%= yFFٰ X6# >ѫӲ9 q/GJ@_5!?qG9{? OA2MK_#;*K\D˚~+-F;".N72dʃxX8^G_IqH?q#>DdfF|Ι3g|rK;wt%8ۭ[Dw p e20e<&W4xyxfEi`€w^^jKff^v)sTf^Eвy 29`c|H3'Ƶ5F3):^u \k~|4>)o" _cp =v<3حx̘1Wcy xw`ׂfp-X0kv Y}Mt'q7-JX͚5ǎ+ q\fffffffp .y0wdL+Z ~7e|s\  `١6n:hժUp5ժU=|vO?tPXX)r{KuvF?rp 7Alhe ůnv4 5kY365jT+nrʅ7մC[Fm?S65k\  `Y۷~WCǎ+dݜ py晁Gիsz 53333333kfׂfp-X0k*ٳg'~nٲk^~,vذaQbp֬Yg9 W_5kfffffff p Z`,#m)S]%ԨQc7't\333333 jVZՂ *T‰p Zaa.{^J1k2#r-1-ACpW$u֕:uejmBJ[o%KuVpvZpWoذ H3kffffffvLsU%/_ނ Ե%JPV(^́/w,dFX />x6-dZ8묳;Q%y'n  ™sV(_dM,Xj(W\ AvT^:s*Q K{@ݳk׮NyXb7o^ƭ53333334˝;wpꩧNNX>#O<ʔ)xR:ZYtҏwڐ<vwC^-XȄBφUKx)vi!3CѢEըQsP6 -ݯ s-;Ը00v6-/qmgoҤIQ5kSNCMи.Oߖ]dɎxkV٩Gs%\33#aѢEȑ#Ǐw/Lbdfv0NIN:U]צMZi:a?6lduƍ-Xp]&'.-dv׭ZrcÆV7B 254jxuƍ.XBo-[\,m۶ ZhAoD=}﷾}^֦M@w},n_}Ufm~7?}633Kjݻww pZj \z91~i}A,]mժ{`g;9ʌg9s7o^ߺ :3fpϚܫW;oه,2J0f̘`ܸq!CA;wvBMJXc73sn}y_쾛+435IȮY` ,X`BZ_̚5=>_t1ҥ aI5U8G9xiӦ9l[xqЧO@8 ^up}ࢋ.r= eiO>=;wn55j|ɠ_~(?|kOٳ| cFDfff4V\_>/?cBlo{p(+ :hPABZ{T(,QZRoP/c?*RۃUVgp?S}YR\!?Pp3o& 4pʵr =0vb'YV56)e'xMPy?裏Gy)OQFy晿xx1DŽS)+Ə3;6ey5СCVi=k׮_j!o`†g婏d>㎗zϝ>>LKҥK`ڼy,7 +VlV@?ஙYV5&U&Lԭ[)qq  sLye|jmԯ_ ـW?9p[`-K.$ܱcGQ^{nYp;M4qi?I-tIpljW%۶m˴4P <X{.m@\rѣG m \,m\pAp\ss{2roU7ѣ=/E/]oe8hb^}'z333333333333333laY }jlSO=P)~#M7ͮ "{uPWu_Q=ƍÆ sn_:t %ho|utW#Fp.\8(YZkeUΒ:AlƲq6dBYfg)#=(ߢAM| %1ԩSR{.PYzK~Q?&rl&W\qEpIYg~_hQlٲ`^ u…{yy䃴2xN~^& S߾?t~*x衇T⦬) ܔpQRXy7+e/"Ppu`/3Qʄ%J@= -\9!y&O7_2 rWCG޼y] ʦ^lu'Mg=#MT>W^xU,w.e.]q ?ԬY3h߾/yq.WĩG6lp!rֹUTkǪ4[~PG*h{'ju\o5tQ|cʚioΡސիW|҆hSQ)j6AoN,-\vRegGE+(Hp mٲ%b~?B:|q޽{;tTթSA6ˑ#i@ 0 Pu{m۶uji׮]% JZ^&tL2;`*kϞ=7 IzYҥan` u0eEL)}͛7~w$ ҦRsz_)p}YbhF|$B|؋?'.xKuVZkb@z=(;` HHweƊ)}AfGZ"P@{ꄠ?GMD}?QWZl/R_08 ZŸ/gʂϵH'cuu X ܌O=Iԗ8m,sBbR:[%#7@ImK-$㚊/']S 8ӞQf6nsZj.ie" >'΄ LY[vdO?t1`b &Փ8FU>1pP@Hl&hJN8Fv}θboX"ǣj^h#@j跣yrFq,}P{ؗܤLv pKsN~ڒ7A=Ҙkq< `J<4'@68iΜ9@PRhcۨO1kT|bfAƕ<ǹ[=b=w\yFQ(:@a%!O>9`3= 5^!) '*/W Tv53o&_ոt%3#sM(icxˍ |R~N\r2`BbŊ£=c(cK>sn+5k:ב &AҘ6 $ +/s\> &jM&j([333333333333333 pQ P "t#K\0H+emF xn$ҕ'`J\6$Qey [ڵkoQ6Iey?.Nո6pYjƁˡ>P;u)&Tݧ:,v9KſYK7(]S(&+~C)p ( rƽ _K-J6n:۷Pt̝k m?6s9>K~ $]`tňJPq@w pVr SZH 9H\\+0BP V_ m#y۵u.GQh 2ڠYiOoXe,L S)7KeYYxJŋ?)C6Pعs6J^DFJo Hib!HW?v^M,].PB{=Aa SQXVK4].nS9,C-\A.D#w`Roz)XUytq $7e)8K{ rFƔj2>iX7Q7Ӳqx<pBzx+ޫu<))0,%K twJxpU"@]\b@\)U VpGI8%삈kފhaW߬*KuFir Oy ċOV77yBy@+|n+ /ڱcG.\5 tDeWƇ.I  D~~܌k\ir;ʁe*_z0M܀>=%-Sw[QF#UҪ߫>T|K]YupV5CեK]JnQpqK:ҕWM^/i_tYT Xu+o@W>*?ݻw3gL̽凒X;]4hui(ATSO­u{ǪN2 .H}@c\n: 2>z2ܲ+ "@㗫[#ePZR>U`--W@ѣG;'EDW=xVC1(|-y6[vyH Eu)Ec{269~X$@ĉ_&!p4)ur%$ *^(@_)頨9M9wߐ&| (z׹Pw E<}(6uB`MjK\rjQy9ղT1Z61C KƨK( X}y8CԻսoX˦={B-P h.+ M|ڵm(pU^Խpk|:mfffffffffffffff]@.6)OQ&6( >Qu񰛽> ԋ۴QW1(ISr\Dq<bS0)/ O>k甥w ˗y}Yj]( qc5z2%]B:8xN1*@s.\Ū#nɼ~pJTN%Kс~RLsJfԚ~/M䇸?T"9_2`W:F|ײ ^ÆsYX0(mT˕u {%ǀT(D2_N%,0<?(~UIz"n!P&^{-y:vi_@ހӤA؃H7 ͞yX:ujk$۸f{9&ݳ*WO*3P'w m [Lic0A=U]'(p(-{H3@觔W&z1](kU1 飜]ǭ`qy|+pQl6M,-\8z81w9>; BwٽA! r;s&AKsJ%@j=6r8G=%!-W@ wUS/\E*16 e.&6@c-A?*/A`m 앋 rP[#*e 哶/K1-$ؽZ%I`W_}/"{.&97bF,ǟ/I| r!%pJy۫[y g@NCX_)sq}U`Dөgݣ}"$ec5ȺW~3Wp?cn9?jSՍD;)[._lO<9m &(.s ܌Ajq*?TvWW'٨lyW1&KL*VOǸw1 U] Se7 %=ڰ/F=2n}0333333333333333ˮW0v_,Q1ô}lmrF'`tYZ@͉PAȖ(/q*S7 9L=+8y_( q }eR|pW\逖Y"'uh9w?&blm┪SK]+ Ȧk~mqA37QCŠ'iQ>u~I|Թk@7#u3~.I{ʻBj=?r= 0 Wݓ:UzfE(.Rrզ{ S[gR˓:q `;.-% >ǵS~ \(|Y`!p<$(Z/9m&fCnwzA};w,r|kuXm!( dWUR^RVZu6qFﮩ2u>p>] VVlF5w܀e,wgCٳgDK܌Lu_=T*Ba;>tZVP jVmq.!qYZslS6~aT.2_lͺbl& %do?{+/XCխ= 66[g$}0\tBAe s60K-Oc2 Fq e@{\Ң43gl7N=׽NؕOT+3\33333333333333Ʋ3H2& Pj-CqZ>= HFKn)T|vYu]YqTlZ1 AC/ʿxfn#)6Q" (F޲.MLTc2*էsXyLC)j`x$/UoΏRV@xisD |P,,J_- \׀A6b26>ű,7\zpW1 omF\ͯ4MTws9s88>qC *# p}6\RѮbR;^oiȍ)j﮽~Xy^om>vuj[R^렾X.Wz )~|CY8ࣃR҆_mT}.t˂qQcA[ ,!_H\u/hF5C1s`ffffffffffffffvXZV'RLAkdA))UyS+63s= z%AulUx~gs+,ZP$.WԜ,ז3BZ pN\sʔ)Cv6ȿc}e;vHǽkBU}[_f%h @k?F ZH]"5… OTOLg`އR6U@ً~571ٻwG dtUګ WmHA\)S1> u lFK[*T71;W.`裏 țrPݮ{&>jnx+ke\r@قOuʯ.ᣘx;%.Aе~DZg)k}:WFac{/P~ .*>^AD̿*SzK͏̛!?'"e)y*K=^jM0 ݮt*h4QzSpyh=f֬Y.˃p,fX$ӝdeWZ`\w^uW+qW4QM$WQr}oRn g.T  )7@E+0AKj_kO*>w (h )_FyNrIW9hD~oL"KAT~2tǫ~, |~;6*^SJ}]T}AGM$ܥr۬|G \{ OqTW*MH7|s~Vv~/"6Tqr{E,{`;B:]w:4YX]t#R61u:8|Cq$쭶@y\{VY,&I670&|ʗrSȀ(ǥw>[qS~6躻ɣn{Frac>T̷Qt*+PڟEv4[lq GGgHwy૯r`/KX g&́h^ /v:'I$7iՠb-XJ2Ud  ]8J*,X UFL1)`၍k`Jz΂L szȌiɧ{ERb3 p1t@"Q}w,sg&E\Q% hW- D>I@(whns,yQ ]Ey((ۍYLq;#lP:vF  DBEe o'JD` yVWO[߾}Ԧd@YʋkD;r7J6ˀ29 BYo;+ O V$V<U4*RԒcp) [j6tPw- $QOUq|[ x M\;y=Po5r)>@̃zC1 $1('EF}d-TIvup%|dz2b8 U9;ɛċdA :5yl_}Ba&qc<0Kr8`wRfƀ(86U@L*I $.K(W҆S.;8Q>%NG8\/ΥF ,#RU^߇PN *ϟVn䞓 \{"n_\ +[ʒ2$,f)<3y"oR3c=ڔ)'u~ MYSIu 7x93/sOM8t\[b-Te+Rp:=}cƆJà o:7ƫ΍Xd stTR]/F;c7UΗ9tTL*,H)p{j/ͧӦ!WWڶ\!U#gsIp5+E_F6K n:7ՑL`!B^޼Φ̓;ttz~=ee˄5j_ud8bp  L(iw2(#nsv5k>ύ`!B ί{Jifv܌0@ 30Ƴ9@)RC}AQ@WJ@^(|&XvkFxO<` 3%H1<#s-Hy8~\S +sO>&u#{@Ǜ51Pƹ& dM|21#wwX)YZiCS n|iUP3TOjv8UƚNCJg~ @JU7`'0|Tv?kDBG,q9̘^X:3!DZ(nNisD?B'FE\]>_ [b s (YсVK>qdE-\z/\l C͂ W]s*j|[Yg!# kծ[nWoO{?[ Aum¥}W֬^MB]dff7sV߲gfW_{a`NdSnv NR>ON\,`fY`(VLYՃ?&=)uor]Ucnl|X۟K33lQ*2lR WxgQI#0Kfo pmo mm#:R UOC!6#K32M03T!@( Lvcjaxe̔?Kjܮ5cvuJK~`@0w#P2ZN]A v<#|G#A260cwQ_| ),dx2X,GY?70\Bmv1hd|s_nBv WwWz53ن _=Ar99;rIu+twݒxQ̳ Hkp끖x( o.A./M, 9g(a!\'(l.Yk)eCIyS9VM'ԻAT],%`*߭402ŵ^"P˹P P!ocvJ ϊtw~%)E $KI:u砬 8 9Z+qhGEiTr-XȨ@'W?p/8k@Bf[of+v`8'3hk =.ehVq!@.ܪ Vb59 j{ MOPB$+|=`Y?+q;X, 0ˠ@*׫@(/w`ݺuanj @Wci,Tq(7nL,v+w c/}^4/-hʒX  Z9%)į&~gme md'.+I>c)3Jei9SpIp Zu:33;ؖ#GN =qڵsQQ)UT J*TXm*[oӦMRyTfpS1rĈA-Ugyw}4QfffnAj^1UMQjmu\lURs͝;72W|͞ q~{qU`;Wa x\2:/=PuQ?y֭^/f9t?wB,#. Vs+u/؏gfp Z` p fyc&6.̋.((S~;e&XYk} f7>%\ 6z|233L6(p\.0W~onUG9L>R(JYG}C4)eQ>; ,Mp"FA٧<?L"kӫk]WwM(kήYk]ׄ 9DPP HPr" iz0@=o{NN2ˡb&(A9lٲ륹\{l[H]o5\vfndk[vsBb!bn!B!lG] ߳dڟRr% _l? -akK|((`/ r@Y{ZZ~9L 7 딦>ֱp]a +G*E`KXsI].{W% (MiU=X &up+M9T;m!Jwnbn!B!L#2\F0rހ X.$9HǮaB!l` m-9^Psa[#[CnEhΑw]Ywh9s朩Q9G>v=v<v!D #QģBߜԌ: 7 !C 7pCqCV-ZGvH{65o @: .p0 !@ 35BJ5u!<}w'V4ӦMJMN7V(䥎J>:-N)kҥV#ש/~Zr#aܲe !pC 1 1pC(W9p 7mX8 \36olJ sJqKyB!Tr`gB ]O0p7ׯ_o  &bd;_p Pۊ2[n;K,/ԅōlB62|XZQJ ׭[gȟ<58%86;L+LF@aV  7 !C 78DCիc xn r#iQgOpѻCNbES FjmJ yIqO晴:8ɦKB!l]r \Xb XP/vrfdٶ2+7J{ L8$Kz"oғ?e {`,V zj h@V;@珕-`0$eQLY&M1А:a}6b p5)1a>=[+;[P?e!x~m֚5k,nw}g*e5~ 7C 7pC 76=խ[7_/2 . pC73b1܁w*Ue,;Tk7Jm': _u饗f5jԨ͹~ߋל8O[$4o,ڮ駞z*\!Pfݒ%KE-pH b#Rm3M{g`2Ph?` tg YA$/|y _&>NZD9|s[SgƽX:ɃkXWSOJ P:@lzcM?SkؗG'o'Y:o+ϓ 3aQ:p 1-p?1έP RE^aG::.7|E [q75RgΘKSk\wQuE})vSƥӖCn'x-<N" ]tQtwOј{b!@ ܰaCixv Cڵc^Pi=t9];3]ϛ7vyg /`j|ARa=ܓuW0 9EA{7d!PP KK`E a׎pJߪ@'TrKb/鹏G0Pr?;<#|PX42pɔK(+x(k׮5<?oM2ϩd yCdD@&|(ȗ~) we"r*M?гIh}@n;Nz 54>GP) z?QQ!ԅf}Qkpaa_\G_K]qtF>~gV֭M {4~;,R).{챬/뮻ʺ+Y?CUh^YK!PLJʏoG⟻m40 @0EMVZ eK2$Lr+ doUX|,ixN(((Pxg :|J;QRn;-N:څ@tKmڳ) ԋ hbg}ej$>Ţgֽ(\oӦ絓,Vyzc;ky-G [75.j咶޼+5.hE>(qz)|\kG˿keJlt?ļ)z^m)Mc6])#ewQlƴLrO#`wՔ ?%Dc11ucl<&E6{|܌0#/)y(`裏Zur1D+{`>=BcA3}/&kfYge:zp|'rCuJ?@_<`it5j$p Sz<ɇsv`Y8Vn$Ь[nŶгuJh='kRyu+[Ձ1/q] X6mj}!7Y=gXpN  9 0̺馛N"9tAos?xWCKJ>n E=)(Wݍ|w%ў_|1Kp{Ouꫭ~]v7}{K%M_%V.ʔ)SP[J>K(Yoíi:n.]❠gB!VAP xJ|5~#!7W hw8J(פI(5)gHU`]"A\0Ih}r" 3"DpN [$̿SVIJ MTvoQmL0E2)=DmlW˨ 0uZ.Ze$}N;CaxKbݔ&#k HBg9F;-ߦ۟W虭{ ?D<ԧ-U*wQ˕kH ܊wvFo,XvNhSl"د~5ɜco\i\{UTg+zMO|Y{ 8BO#(SJpʿȪKo5.T_{PINڑG@ ;%#wXM녁}M:Zu}/3sYv'=}l.j>q8=t‚iwV1%v)ۃ l6nE0`M[óqS-Eeyuz ޘ<󉫮ʢqTVd.0~H&8㌄kBx3Y1~|-8xB3TKۙbo@tB߭}Bh+!juT=W蘆pb;qqe|ȗz)b\'>]Lwy u =<; RwB!lq"^4EQqv]~$/4J~ld%!BPbKK^tL =$ a2jW&=v?^ k!;=S&@LS忻&-t$d$O 6GR#4:I@nafJA?BU?Z(P,+|VK\I~^zaG\[3Z9' ;Q [F"/vӢXta]v>VcY\.nQ5j5+D^V|ŗ*^Ϛu*E=z~5 bṳ_z(51+]nA4IPk/fGzԨUQyW奔Uֽu uҩխ=,oy^K}:EKV|aZ?ҫ6jWjH{=>[8pYlD?Aޒ-SrCYWAAQ}؊|;jժ6QO!w)* v-_ HMΏbl&KeEuGƒ(I:ɋ='ֱ&g*TO4,X"%$ Kiuг@I<.Q~w P2/Z?z jE/UTn~^Ժ[hU9 "֫ފ^ZOT;**TkhߝzzҾrk[ qf,*j'9FmzXڴz?lJ^P{,/Kգ5G>:}@auW ZԻJ0,…V|j\t\ɥHж4q,mb EN;4\sJd-SwӘ-nY4K^@p]C%i@,b rr-)jշYzjv ({wD e3pGcYh_2>h xeaݥ~^gV¼ [ݻޝ#8 O`` 5 &OvZ:xgsggs1 e-;hڥOx,RO,{Ko`Nȗ@ac7i.ɼͨ? =iw{Ƃv}iF_v;HZ K{ O# 0zc.B!U. ,J/Vb8NR b-©|!xHcdM[K8 |5Am}٭R`ŒAԨnt?$ iCZ 1i5RD~V<`ZTYzJvP =/nB{nX.}BVW"}_F_Qۢ5/KE\@iiGP0}Y߶8]E_~3+ 6c }%znhҧ#UU>?XbF4blh~Q~]-_{XZ}'?Q,o~55j-+[;~7Vt@m?q֧BJrq [ >Ed!AX^%D`5 *{P :ɀ} gu#+J4X~L2,V @51y)@!=ߥPR]Ie@FmCX:TON.XUK -+ RC&,~e8.Vl{(O=%G@-*%]Uv '&\XjbW+r,VBFRO(`jw- i9R!s <#4ؙt;ᴰC\Gf<5}s7^}QJd~q- p:Ȩ(s?`.<ܫKoHyH$}QG]iַ}V%`uH l=@kYDEѸCTn%猊~Oi߶[ҕ|ARֹr'].NlV+5RGye\VpqXmӧ\_OQh4]?gc)Eq((egcĺHO*6^P{ΰ RwtP;u]*k|4wظ%J?T)D'S?)=f=2_d!x\bwkZ[)XYkkXv=Ԡ,HK;0rИ,?53'|푴b}gsE`S~Ѹ_G ZitI1} &@Rͦ'ӾŁ%W7" 3\#΀g\;z OQmԷZlP+/۬sҍg !g/ppCFc@0J_) + n\E} x+!XY"Ha["Q%`Uz}l4?ǺQrifM*R-|f2hhe[{],*Ӏ?bp.tʣ.a~0u(["ڜ{QP59_@X|Jmp8V0'息] wyꤶ* y- 0i v"43@ 0 .L ?0 pꆒꂰ$@\3;+9t \]k5E*e[D ]B},?;kl#argY4N8-4}0' ]w'}wL+.KZf7ɷםWf{jP1K;l>De_}i|wlWV}@'ݓP1% _3fv `3\' W= Vb~n~7sAaLJ,;%EV _WBkn,«Im{Ho4FA|׺\`2%J{`,:S67+Qz,V)u:pp;LXayY7N\`R&Kh>i+llK{3Y>٨Y/Wl. lcc,g!vP-9]\ںo!Z2Yqv7}s/< a[> 9, ;:iPqqeFb?Ӝp fSYv(^I~t-Q"Yή{]W(]sYG~*|s[3Bw+Uv팛QU,rfe]]1}&Vv{ J,IcTf3Bu)P1zYDUP(e R6\v]TN)z_W%E1lsҽyQ``|3շ-TNW,qAsAf'=L}4y75oqcT5,7vR;_@pqv{cieuLթg?*HFgȜNLPf?|;*KW,8(9XEQtS~dϞ^ƭ !B_ zʮb5৔[48ӭo5AZiDY7 .RHv([9ٲs 8J I R3\P&1[b~ Do  -'eO5K(O  vKb0eQ?;>P-2\&z6gN@ҷl}phRt'Msp"LH\ ַZ ȥ]ֱUTs7 qujLΰ2 {k2j+}9z TVqgE;44PۦCVm4]t@ek\/ok݂ƂB_/]&>Z={C p4'|{]J%XAh<p!@q3?˸B'7,( r3/g~YK E.h~GX6AC $3?ٰumw.1 )TTq9u:p1܊5.wT؛ >|˅ZM: p\4iv1+ V"pCئc+o1O#m@\>9]e-])  Xb;9p GJ_,Y֩K]O$objLRnuG٤ʁ` pϱ`k"kf!U'OvX vKZ~íneɧP  .eZ~T ?e~[knpz4*㞴+z nG t,ct@E},{S$n(A+-H-ֳ;^V"htXYSF o_inF z򾐎z򘟪pg'VjS|s!lv{B08Ů+^#Dv\f&ʊl"|]XWbkj-,_֪UV/ْ/e4G:Iw1@fN Bw!B(WV > 0W I yd0ZoF.`(꣟aV݃5R^hf.@YRJ/p!U7)Wѻ+GU`@ XEYj"TA`["4p,<<Z (~U|/ 8IaHo;?/~?=?OX ME%%tS3_gipv^FѼEn c9y juxZhx՚xGmpm&Ews\F$ʻJ& fs Yk.x|S;[Q bkQCq'K ܊![,fW_AeB2F-^#NV9.ꃷH 8L<x twXYsbɾ{e(p@V,.Y|˿8&W ,KJq͠m ƺ퇀nhHp.+ޖ/\~@pKM5Gc 1E6ʽX{{ z{58W>]~'=9۝E}3 :0TwfA. :<6ﯳ-xWqxiCR ^5 : vL; ޫP`y{* V(6دts] )[CA0  -$6o;"4:,ܬ[;[O>-Pz !>tk4lkwv8WF4r?y,o!.5ARsi~XIT?5Ca2$de~d1va +ݛpZ ?Mu!fBy$`֢6 ra  C3jЕHSܗ)?qQysX/3.jW>ЭXKB<+}q!|ހd\( D300n 'Hpatv a%&iQ%s߾ Lw.Cdwzf7,_a=1KpuB!PE O"PrAdcO|Lj -?=PY]dWT_O $ThWIoI$jR[Z" Q~ H=j+l "{SwvXvabn鑾0- pu2M :Χ .l lMm|44nn8S2LU=mW^4+[W+F7{bOspP|+f ?[4>;jV:tm~%Q} Nc;aBB3s#qZ~.W~]j~>)ZfΎ pWhUJq?RWV  *eXM*o2)9~T)u1XZZuZPcA?+ wl6jV>pW7%pGދbvx'ruNL^UoG,>]`{>L=ԶpCAjXponcxyqmj__]?t br`@WVWY>B'V h0ֆ pW*.׽P~ЉTrNTL\)-&]}LD9WWda&8H .W݆iRXqVO>.lFJVuu_5Mvm.~xa T+[OYs{jpA':#&aiQ\ڊO۳>2/%x4鏗{9|ܳ"Su*Ld;Vu,byjo9lkɦBSQb=5hRnl5pq7'5~,)zODٽٞ.eQqRg,|zEq q_K|pc+\(6 &-p@Ljf6R{p[a.ֶz-Zwm, ,V.Zn!|b?Fz3yߦ64&i'`<*oY~ܭ{9 kܢEAsW2D1g_ܷ9K YXĢ b)c\6_ P% Wp񁪶OX o(ep xT vwVpey_YTLA0c2 f ]hsPkY,+ңq+I~2I 9Lb[1o) <',= p7]#:dn d\#?}^{\)w\E2b/;_˃wEDu|M.:Õ9#,~1V!(oW!`=%D_t$CdK"cfHpjU Lh$|ntZZlP?B=4(媻l`;e^ut ܅&% jy,w"MN)])/nz+|nJ@Gq P>_~[iڀ SS.m`EhF$.W~B_6?Qgy]bk\&>obV>2?A(cN\zm|.¶+vh\$'/ҭ7(2WYfD@'ڠI}|mˆ1c =(]I4k}n~4`;;}Ψ@# M- FδQƇɯmw5hl; `V>\+w` D.4 ,RQ4|bbi\fljVB~ Cَ~o׿"Zkl4/j#:g\Nأ= /e_K6d'[%/q \A7-+(@"'+W6@ˡQ1KO ?%n0f-Ќ-H>b}n + #pĥ|@qOEj4W2iڲz2aUK\ p"9|iW<7`8(g@Ųn317nGcDgUܣ6ЮLbւW1AqA;N!zY~,[f鈖F}p^jop3\[7\wޑ,y]({?\|fwB!B~%@a2+`[P|\_ P7{p [m(6 RhR8<0dKQuRV^JV!xU2K9T6&VEpQNSPx;L PքşIj)0ߝBV M?t}Nqlj縿|2>àS-jKr7ߐs͟JDZ[R/qg4.G[+[:lq4AE>%Pɲ#/ plC&noi_a1͋ψf_-u/Θ7&xB[>=jҲr b=\Z5d&o-@@qc\eJ;ngQ&ۦ`(jL6Xڣᆇ߶1(-y/r?jc 9Ci/P/?o(.V&1p 7/VCH7bbAFG$p4VIGs`W]z 1غͻ$!\;ky!vt%+ߚQνċ !299%}o=6+ȵTeq11w'g5oܦFq=t pPT7=9`>%K9W@VARHIX^WԾI@;\2KH^Ke:ঝ-p1 )QD$`z1:$#Ohw;<샫vR dẘ}jnp p該4c=/UuNcؠ)@?r- QW ) j>JMiOBvPg=42u~ Y(>J:$h zrxsE>gṷ́r ;Xy'^ִ ]0rݥ\=k{bI+ `\LJ*{:э z U3D:%?L5B! p}duy܇B)+H kc*"P؁?PgAdwGFW8`K&=p Bzba̡ 4 ` M|lAQ]* \RWߙO)}ALFX7'apj j7\Cg @H?#Ô4VKѯ.?2~*.!A Z`x*l Yfםup-J;'=ԡǛ;=bU?,IW0.^v~!aX]p~v%oi.[ʅ`R%/ZEz[ .5t I meXcdSg PFJ@mw#K5N$vڸJ`LK#ոَ1]#vK\BF>HJ|lɖ`P]]M)K+|;: _*Iw B7~:- 6UXi:9I u bq;r:lXF)]K~}bRv@'ӿ0[k 7r>X3#+=QX"4ַi!=U@N!YJTˬx Kںn֨?0,xM1`WҬAc_n%)hSgmߟ=XJV?m).thY%yAP}&q@o | ⏕ET,EX _W2llL9sxW7vCeQˤ͇`KP'tLs+Oo"pjyx; @r_-7Ў+wO.y\\ Ї̝awGXb zI W;U{We1)w p.&ֻڟR8O3WVpNmw0yEglV 7]s I:y!VLZ`LdaLQ\w-ji3pUC@D + P 8[^seI(@)a]Ԗҫ̗(@B˳Wƹy) olOqevJ_/aWjW}3G{E{)p!f+-(x5ـẂC3=ǻWU9D!.n u\06_ o(:5nMt_cᛖ6(%Kַؚ~ &X4081#hʸ Nŏ7c3yjlobj pFuu`WpO<_kb5-bUu<$~oQNMB.~"hK=X >;S6 A%vYݴ,Wfn!d t#vi--+SC$Wj cÔe|2>xt@i(U`X/Oqϒ!]$,=X.LpĪ` c\?+t }N)[ 6J=pL>i7ԨQ!u˭dw&-φ>ru0ۓ^we\YTʳ)VYi' omg봸\J3 ~zg &p ЖYVz_ΗeTɟݰKrȊ@c/yNb 5+C3Ahhߓ1^݇`e}P_бؗTjNu`ǖ>C b$+[y]0FۇhWB\Vzoadu'hySXh~/,.p]}rɩ:)C*]c7owbL\HB!lXlG9-`9팅oݑ`Z RK"a{@m@Gj2.(3@Fx 'P8J1]rz29,ll{n\!Gz<9,(둢m !G ? /U왤ͅC)GĭŞv%Pe?/-n=y[{?:D_uhew+JUߣIkLΉVmz<;7=wF+|SB98px٫rG7qVQ=o('~!6Y뚋L9r/Peq(ZtgPThmZѬ^to?J9K&81? ӆ2ὶe @]l)D9ˢ2 Orb㟮cl_cA4Y;]$&(C$:t$ ,oPǓ ddJْ /IJWdwyox 8,|uk5{ `X٬pO:C3lߴ? =>0h 2nV=mv'aZms2A[m;c>賜~E.@i; ˒6CQYX> <;sKl+n:PuVl+]+z57L9 FSuO/Im^ ҽ@t{СY I0J}NzM>qHagWzq,zoHEPr۰#yjG< YscC䢡ok`"B!-pQfbrDCuLet J5p\Nu`)MNd ɠ.>j;7 #OVh*6Q* .5)%*J7uv&@cD}qb:1S6r?J1ʺJ]Q)3{5kVĺ'tKe8Č<# i&^ !IIWDh&I˷hx"} pl؃/1{RC59Lw yCW^벋sD 1vj:~i{kJoT+~rtI'$⋲C}|JOR(9lVӜ;]Ҷvp/D ;w(䉻P&x[W. >[8>O [Tajn rxZNV n~5UV= /7 yWOAyrRݡ>EϑyiCoar7rY$ B!__rF5% Q>(WGg"qr KK?D@Z 5 4rPQ%p f+V;>u?Lt-qȤ^l۟:sƏ5~|{y< W6@aa[bQzomX~/w>2mD߱a"LRSӏlۈ 6'Vu6 Db7-Kh2x6E pB}̕KXp?Py‹|Ui=w5ܠp7nϢ6M/7 BKK+.My_]iKvq ov&= ApKU^pAw=?=Wh9~,G[vvMANXL#3Y !fn\e>c2A X/W"[[sK> Zi A(i9> L{+j`yEd`a|jy$ax'xUAܢk}X8a2L.H-?}=ABnP2P)O?,_YsõJ\MiP;impOlЗ$N+ck܅_M3߰_-'ͬQ |ϕC5f"V?mڪN4z ~ҽ F dZ&9뇒aJ)` ((-ܾK/p}pŽ@ԌS\\ Trz [}z#nɥmPC~UC:dMpxڢ Dϼ\-ΠP LOFݬcOkh~sи~Q9X%ྥ0 pC(΁X)cf}\ٜn8^ Xu|}vXW }nv`b@JCv[%.DFUN`$C?ZCj-%=dW_}~]*.6|os❙J'0@8,fHd5S_#@w41@Vv/@]2z3%`(VjAJRWsڄ#:kyn%5)?c~I_.W[YLxO[p1]m`;S ~PbBЭm\o4,pz2~3We)7 f{=_M9`7xYD'!B2ccPV(7ߪG))L$w{ 4Pq9AJVRڶ{|ժ8 5^ +TKf $4!͖$Ul*!~R[)`n złiMh(2kH>e5e! zA#7LBm{[sNW_H7q~CbiB4I}&Q7S\?L'BZяM*߽U^#@yR(J @ZgE/t}2>-7F,eE4@ ˨+gcSJw$'ԭr-siȑN־ތin,pѧMe}%VK7\}r?C>y/^ \?iEKV}19w{F/P4Ӿً%,ww_~3\+bqX넭poYb>[h!z|M l@ EYԸ{2^Ji+{cYJce )44v9UǸ*Vdb]I⁋-r1}S[JN"m|i~{Foo׭N0÷Ѐ,:aȸ.]'dJUqo T֊|N~8Ud \&ʽtAd>m/E\`!') fn@3P`,=X7uPRPeuK<#[)>g)yxvԛρgSύsh}E +_<7Sy8y.ԝ~vtUt ż[p?"1ֽ,u{xԏ{y~:~5.,`Vw!B, 0JH\ S#))$OKo@!3V\cdŋm qWtpVHx|"p΀cP:Œ{`-\#طՋ2)h>A}^5|ȓq-b I=/i| Տ639f $h{lkdbDx?!.c`q@ƷLe+̤q2<ܲӀ1e-{<2G>֬(]Lܟ~snu4#QET׿'h\,K)v=#Mپk4Xkp+݂~sI>;iKw }XڏfU[y~Z^pVד^NX9޾ vz =+(e,L ʊ2QQ8:7 2sq-\ܚy,1 ͔.q8MRud4{Sנ!j{{t !,pdRpˁZ5ˁe%=zm,n+f:(d{>GyylS?)<#'L7ӱ>|s-sQϔF/wK]䢛+za$Ba.?1lQ`Ghp*5[Sލ eg4%?\ =V/ BVLAAEe%@ L`@Z:0xSZ=%'LՄ##n({[apl|)yL>M*([!IR7z&Q)GO( DO|gv߁{g\7Ă`$m팹RjR@+لX% 7c c&Pe'=Rd'L0>#PfW D[[cdXΎɛEغ)Obz[!1*0p{{\, k}n n|: *>C(d:*\c;xL~OĥC!B\ ,3/v("F\Gl1@DŲ TOҢ` 'g;0FaU' RʭrI"NSD(Ihv2^.xp(yd2€)J2`ONҟIj Lߤ>__owO9πzp b7Kw1G?ȦŘ b,Xm͸\wa(cuL*G:9pi\o*g?E2A2 yEO%T!By\ʔ\zp^ĸ }gC(w%kcd[Z< 8!B!P U,}l =2Gz~10\㐯D m,WomQ7- }#b!( \ɣq1݀%GGr[mXpz9-;8i'N!B!0w[ XbӘ6B!pavgLl7;B![.$%YJv,ɖ5K%YY+[weV b'Quˈ93<̜ss;#q 1o؉'~c* \UIWbHЁkP9 E8X A &ƍ;<&޲bDV!= 0BC46a$] \ \* \!ܬ/pw[ B!p,Pr Lq x%UI ! ,B!m}KK{3k֬( \ \* \!$pB!+$p%p%UBWB!BWW* \ \!b }OvKeΜٽ{nԩn۶mno;ҡCIܴil Ǝ.]FabȐ!n!BWHJTBWwXt{',".((deF\Μ9]ʔ)]oFiQr&Lc۷۷X&~„ m۷wm۶ute/T;V3fUE!7%ۧ3fL \ ܺuܹs=z$:>pAVI&M*ժUͅx;s}0`oٷOÇVZQXRD:-c\K/vr֭7`w-;myn9!5jԋہ}'//BnIWG;LvTnaH{_ N;-Zͫ{^$K$pO?Hvڙ|wݮ]rTTi1n?c^ѽx-+V,/ s0׭[w%JPΡcfN#Gɖ-[`ܸqX ÇswРAv\8hѢ&J9yg=tcO=oD9 L;>,W\p?kժ K6l͖-[ |1/Sjc EO-1A!0bĈ|)'=\ 2BgIZ&ީPe?uLUx_\Qx]+aa@rETBȤƍ0q';MRBn}+qĔmzٲeʼn$.Q8rhV^mfr}k֬VZ5iIJvԩ>4+]+USE*11z}e1`<̌.;̙3ݹl|b4x)o׳dU^C=a&*U*;vly\\_4 !H"\ E?± O~^b<|REZg?{?*^˛W?֫^ʙKޫ_˧REj~ (s;^-~z/oޫ;Aw_ICbܔ)SfCDc'bG mr2e.YJR~ ڊQ \p3-o)ըQä('mY.$Cu`v=0׃]׫W/ p^.(Fy{ɋ\H~;l믿~s)7m4kCnM?|Y}v ȣ5nYtPQ{ѣGՃn?Đ0/>8ҕ2XbY|CFr98eM}1{8"eO41GZ(lRx(.qq}Ɍu-Cwݟ? 7n/B!B!w{YfΜ!!#ü .d$:d2:wlq7a8B#A^(3]u}¶&/0{ <~b(`xa./0DwB47qe/P{p r~-7o޼%g׆5i$WbEޚ9a„m>R"]˧xt!_<~̎"vv/B!B!w{YR".$.r^0]M H4}R"<4(c!a] {d,Farr_ǃmBR> ^lO.ӤIu̿S .!bPv̅ \1bE#0"aÆ;/\7$$ֲn%b]JjժURuMf&B!B!]^*ElG}D)7;KnV^u?vrqhnL.JR ϝ;+iHeրW`A ܧ9bݽu\AI.SlٕpC {]{e*;e{-&L8d: ]2Ex-`dP1Hn?^v$ui5B!B!BȽ,p-KQXR&=|x FAxӧЭL>}FnnaÆ16!бcG( Gr^{L9/pEWЎ5*p B[<[I? \^fزdx`' k+~bݺu 4ͅdݻ8qD!B!BŽ,pC(vb_!.E,$e^zY+ٳg]~}_uu.\x~/_dF(p-^[N \c)\֭뉋Yw1d۽8 \9Y#G8ֹm0 !B!B!0p2?0:oرc{M6{qIOvr;v8X JD:#D'ٳgwD$pKq/\t pKu3ޠRJ6 ac|BV,2l̸eňo}rz(ǹ=Kc/#$l;AAAar\!B!Bq.#(pYk֬0JJ!a9x 0gŪVuf򐬔e!o]؇Q ˗mԩSmo1vRmݕn+ޓsNЭ 9Ypl[@sm۶u;wu1rycƌq3gt )ƅB!B!w{Y&@?df$n8 c*>f2,ǚx8WtࢃxǨ?8dի[g1# s@L-Z dcCN\'2/~}wط6< ;u>iӦyMwEo3^ߥKh׮~B!B!P2Rb2k֬Mƍkݤ=%*SAB\—1cƴ0ðbǥ伛aÆ#p<y?gN>bL*9:sPΘ1_:v3ǀܶ5nxXR>J*|MX}~ۇpKjQJ[tK)~8~u1 N`և~! thPB f)|:Bq#iҤٛ7o)hk %c ߌlnف I|q?7ܧO0Y,B!B!CϷNRf8pծ]?Im2[?_G6 : )7((n 13tleb~W֡Ks d]׮]`tx>֮])zF]Kxv2JaʕMvRr\̴}Cņ`XMIfI.&Ԇ*cq"!ևC7o0̾qTyL y|JF3 .dgjf\\Lt#F8d~lذd1E2/)q̣tuW~͎VrY ^[[Cm9̴ec(py<\}-ܗVZe'.:pMrYmr='p B!B!B;$p}`(]SWv"v1Q2!5j԰f0O9lpvf1Xb._|/TH+.\uͲ믿B!2H !0`$9 לLIQt/~3ԝ锭 hŽeftW6rĉ|rbeǎm'/^ 9s^zI\޼ym*gСC#&B!B!$:uG$gS.)89&nʸq\ժU]=\~lr';,nٲu//_5jȎS))e.h"]իWw/˜9Js?"8IV!C&B!B!$m۶ڵի{7(2B=ۃLlCO@-^5kָ[+[Z5WP!{u={t#Fp]tqmڴ1qZti,tR[uJM0fyV.?d~ݰ 4pe˖g9r۸qaQ沓vС߶xD;wݏnݺsTV-vAkL2uL6m[*+eʔ??C&B!B!$!T۾!5=jt޾ݏ?؄lJ\M2b“Q*TxwynСI_JަM_EPR2րy䱬Y7t֌36S͚5]ɒ%->\u|A.CR͛å3gNsBر]X0v[4iYz+V(0|i9s![l;v ?/zm;2EUѢE=HZ ֺRm+W.^[H??tx~݉XO{W /f#GX1oD#xʭ,\vY^.bJlNƘM?~%B!H !p8ڣcAf&Leɒe2l21bpv={vo߾}Yx>.E,2tT):u4m `*!\v0??,܉e?pu3rv^6mt"2`4I;!+B!4B!,̔%D!s'S' n{6_ӧO,\epE *RD'lO!zF\F@"ׅ)tq)1q5xر} A.[ K>׵k2% /F/}ȁk\%`4Du۾u$ 1f %pſ \!B! +"AItRwI9V2|b/sr=c?CvBH=f̘Q]M8,Þn\=UF \iًȩ78H ~1ĉCv*v"na7rqXxq- \ڵ~ |_8q"2levIljժe >؇ 6> 9 rZWkB! B!? nVtκ#F$БpB6tqTRŢyX&#oյ^ !sv}–r$.nk׮M4~C>g oܞE3'c-|wc~$pB!B(%%2ermىG M g}tpŐȻ&MQ;$$$$v3R6aÆ?ub jaIxq~1cFJ:a#e`JXۺXݻׇ 124~ ?/pD)~.bu˧MV 6%oUAk Oc![: :|rm?^WkB! B!s5I-a֬Y=vR[gtfa,u),2A6lkz3SBAC(e-D\[vb!o2S9Gf߿JH B!B\AW!Dƍs}]'pC֏VKj֭#*x.=v#=i%:b3\ժU5牂?co/a`ѢEvpF牏/lcrܾ}?ڵk{⹻ ~O>3 jcp<, '+B!$pB1b̘HX ``X,@yG!KKupbЈ@&^U$p \OvDSW_'  ""Eeq Z4>Nje3e=8&s\?v˗7yήfSSFQfr} E,<0. P(3҂Gkf6m/ļ s8PSjS,22qss.ye;ƍk皲nݺwH:u•O>fad޽{I !Bq \!&I0v&2O@d!ԺO%sDA.d _pew&#!((_1H΢&qLeJKU..dY$IDYomJe')i_J?ޟ2F3A eJZnݳSh)Su1q99g2O:qvnm`vɓ۶١8?psecAKΎB!WB!DDܒc';lPYË0tm^9b.YSxRRn _˕(L.v>d͎.ix2Õ\.ЉyΝ ~9J`|? ֻ~A8Z1ԩS̎={f$/ R ;/iҤ ArűlFd< +ˆ/vϊPebĈA^KH<ږe-1J [Cl/yJQ8α  <1cƌiNj`\"xћ~2iw(int0{/2HԿR[d)޹vL%aTfM 6%ǤxعB/_DnG}!n}}3ryJn~!j8&.):p,YϯzU1xuBS3Y~vR.QFOsk̙3@uBqpL.fvᢣ:cB! B!;"p) z O梢#5?l1 YdȈEMy7?iq@G'#LjR u!o5D/a  \H  *g*Uхע7'! tGn6wztlfw)c] .̯R3ff&# SpWJqvN^cb&pq+%K2޿a y,޼sǔFqݢE E.)nj/@)ɱJ !BB!" ͛S٠Arje.2dCrV?$ yj@ IҧO7n8XuCLrŪ BmeSzB fk Bm߾'F0_Y./-/-o)).]zUQ6R겋˲8n ZjYp*r? aիWs~Y;&d'i`[m۶dW1ѣGAmF~mC]nj^hG#GlHL(ϏQ@w¢:i)[!VXѺek֬":ym[/DFxMʘXǯ.4]ˮux'*@!|\_s/cc0f (99[*q8xQW3' z7tnch +Ha8 q}(;a 689cgXW!BB!"dɒ,d-O^/_2O2 vWo=DGzXfr@cZ /v pݹ>Q  !E-õUVvc(ޏXo&`*" =V^؄tz,fM|2]{.TR#G,W .td. /:w0o+~"ؽ;n3b\j$pB!H !""n7n\~1O5H` L\0؄.T۳8sdsm\ ;BԩcUPEfNx|!\ie. eMCl% \} m~uS|x*T1v˞9sYgV+ble_b]SB-1I]fB!;6uaǸ /yӧw!ݸ,ľ:o%~A`f, uBWfD.b2 Br6@ }K mR#5jԈ9n;X>ɓm#Fp8F`j!p)1lc7.cDWEL`Ʌvbs;p)p!~: ꫯ̙3s.ϰfXj7ewsVFo>YB!B!" \Jy 6v;1NEt_IСTP{k W58 8ЧOr~ (ɔ'Kۈ1e,"\ř7z5^mM7n \?;q3/_4i.ڵ+)]+p!j{;:Ct003F$ 2qY!YBƾ}@߿9~t吽++B!- +B%KLj^;6/T!N"1et6ecH.2eŅ AextȐ!nƌqfb$H˗/o:w&c.:ek1JR.>]˘Æqs#\2eJ9ͮT ʉN}J0!e ekqM!}EP2EـX7g9M$vBpFt۹q6DG P8S/JYU/~qmCm˖/_2w A$g1!nHm4p.b?mv:t 6e˖uK \!B! +B%va?NT+_Oч #;9Ic̕PztƢeF.]YU\&;|9 ̎EGG.r3#%!e!cZhFEC\E).V av9Efn3AnL;`6y~̂Jio.\MDB!WB!DDܒp+˦]r%egBX];dt]a`y\[R wΜ9̞!ZHݞ )|֍zF:(A/lj*f/SN}駟mR&CT_F&l2,yXS1 $\m#s11 8'&)9 ^5k'Ū/v ;ٹzemQ?~ur}q<6NWNBJ́f>&a맼&< \>sx>.<ߔ7$pB!B!"B/` nYvR1BFP_nu9]IօJTZRVJS >ͳS̙Z&;|/CxzOEj^Ly͚5nٲew`/)qƔ[l}d"+B!$pB:{/CKɋXdzѢE}0rtq!+B!(p_W!E6ZJW_/aX,Y乙ʔ)ӫ/S`F(3o|/#+B!8k 0 Z=xUW!HF._l(q>}z.]*"z1{رc{!+B!Yg_^Γ'&ųB!DE6jժu֫Wϡ֡ 9ŵ \!B!+ZcX _b 7or$pBmü8Ȍ5rDpiӦbn]|s%+B!:GqgΜqsuG'[Av_j>Buw \!B?~ѣ *#FuKeʔa׭O=MW!HB!P~76;cҥB_?nvrY\<*-ǵҥKE)n/\ +BHB!j0rs3&O~G rfjPtiE|WHoʔ)&L@b8Qbr/B!c$p]AW!BPonʕ[nnΝnnѢE/P|3o۵kw 6ɱc(isPR>|CvnMܞ?uNBq#+ B!~fnӦMnrzrnF'5/Yxq[de--^#{ʕfϞNNlӧAz&p!sn B!O>ZnR*V˟?B!vk֬q!!!͛]>}\^tQ eyFNCL;~;yu߰"ٳIB!c O=8q"+R;v+[K63B!~uw2G}T ƌ3^zkW^1iw51|cϞ=o }N\ƩS(v.Bܯ/pI8/.*Ud-PBwΜ9B!eÆ ״}M6K((P_Nx=46x~[R2~vm 0\[2B!}J4i\Tɒ%skxdJiŏۀ/fA:f͝;׭XBB!dO>quÆ f$a{byIv6jpp-[e-l=z. BKrJ\,!>ywvƍW![n=* ?{FlyիK*B!IA;yd[hQܲi6 Q FAq_~ĉQ`vޖ?v_~>[.'ؔ+Bܹӭ] d [y!v80#Ν;gUB!B!B&Ξ=kcZF9*j)lä-[F/]v_~E'T!D`ӦMWݻww:tp;vT"Zn6lڶm߽͛f͚oTvlM˖-3+ժU+׸qcYP][-2ZW<AAA[hO5kud܎~뭷V^+WkeAZG+uO?T?9rp7nԛW!'}/ܷ~kOF?]^?|B${M>_O@[^H]]:uj)S&6m뮗!|ݏ/(/ŤREzթS!իWOCuWs5jpPEz7VZ:<\Z5YP/-~R; ڴicG0`;w#|g}`{\wgϞ-x|8w]`_O,F:twyBqIK mʕcƌ\+Wڤ+V~nɒ% Æ eVXycbM*K. I\~wޮnݺ&1T.i,_f"M\ws~Uwσ **U5TH(yj5WbEYڢ{ HvN@(w>4ib]av͜9-_>裏mR߿ߺp$7a?]>Yӧ 6uDeƏo+bկ_?{rn”9u>i;v= ?ݗy%obŊ6Yv !g5i!C \~\/&n[k&*U3ۢbJM׬٤JWJ 5j4ZZ#:a{]J+5V?UXTHk4N&|>*|-с̙3Or(}LDh<'@ؑ۴i'6mI\1b{:T'O_#GxhұN\_.\{A/_vX]tŋz/`Ϟ=nƌ&Aɐ!CZL,Y,-RG/?ɜL'JWfRR”22\'?)1/`cB2+;i‡gϞ&m) 8pַo_O7C(wO 5?Fx 5kÖek/|ӻ4i\WH,K8~\_1|Q\y/%L+[u\vREVU|M/3Ry%J8KUdVѢE/`ͥ$I ~+^%UHo ᵠWxKkAUXy !|~TLJ@p9{3Q|9ĉFҤIs٧>'N8pw9~׸t ;% 0Qp˅@Ƨ3z6EW [gvFrjK:v6l6VGd2TG徥r0SAyaCm'O@ϭݡ(t(D:Š Z\Jgp?U˾q]~o^E}^έݛzU{Xu~5iR,\<~]Ǖֵ[QmZ"sCfřp .G/LXSN1k2kffd)U͝;7PK0_*} 7$KBƓ5YsI;G!gvMDΪpYggJGկ\VjurP [d2eK1",΅"YsXE_uYJQhUEUdl5Z (JvSt1c]@+[sΑC\/UT)$`*w\kF+\~<ks7HӠ$R3D(\mA߾} W?9t<Ϝ93O 5533kJQS|#Pz@/Q E(, jqV=rR(DH 4~s)z\i)`^ 8;Ŕraj>d2Ll'*&ñ?4K#yz \6 1@)+MBXF))_ٰa\Cr>}'F9rUG.U~ڵkܟ[KfMnS* άK`ubiʍ˱"wʔ)pC%(\\33dE ƨիW3F')ϒ7B`R'|PPj*͛ ֮]hUfyR&pU:TDKmE"pM&ɔm.y֭[Gn uxw$ T4Bm(Py*+'m(v3Rt J[H0J?R=͗/s$W͝D m49T  T땃N"id_"߮oW9uR _[nj(/*̔U;/RJq"qh5o޼X}ytT =zpδ,\\33d"HJ0{D47"at)8b?|M9[,Ίp[i nW@ƍ] 5L&SԊ+%pKQp(Tdl5fIEwKQMTo^&P_REk~Pإ(P7޸Jߟ(v-o@Im]̥ MO|D۾;P;(J6ʦYWjv_iX'E /b7t雐7o׫PѷeT-h߾Oed\SrRcC)"L|Wf ʙ3{`kFϞ=)BZ"nţ܌zޓ|. W+4,]ԽWd2LJA 1:,C"+Pŭ<'Pf p3O] >W/ѷ.5L&Sϟ뻿)d.w?C q+b_D*yiwv|hX`b/*(@0s&;v@kJo"M`AD- tP4K GS\._'5533k2kffהHIG,\P\%7bpV5vވK"*tYn\V\D:+@={S,tgځTA߀D*AK/6l;v0Ӳlٗu ~k2s -@۲eKqih "\\33EdPd%`,oj:B_7sҵ| f,՜-[8s66 %9~n(pnSi<\d;m?ؗ53R밚C`rc8 m7C ~c\7ȑ+`WlX09 )T1N6g`ߔ`wn)ƍQn2~|zfp37cY6|Xq ?BNw\>&V9&jV&rdb$ ʊV?[ ܬQ$\#76Τ O>9[[1ò_nFtӕ9r7K4n&Q u|i,$I*~ >#c_cn.'~la;AՈe8\`nd|y12OsGd٢ow7EvcVBz9KlpM&dPu) K4-;c¬gǒ#. <& '6pLQ'qXGskXD%p.TrqDJPGr $罈JBPU]$vi1&*DY6t s&\~bB.xs=xd_t@ &Qߟ$y;%ܶi=Wcg mHv Ԯ1HH\L@E~_5_wFt|H=Wˀb*UHvڹ2|^"är~"_›Vߖm"g=z%R7*"xm緗CpQX&f\s-~e??n_nޱ M,Ku-;i>WcېqC[v ZnmyK\][|\| Ws%1p@OnHm}NPѯ^bddL0jl/l 7y_-_yߋ6|ޢ|JCUVݣ#M}.b\d2LG8Y&\9ț)pv88DH^_9): 95*w[<8ODYy\ +Ph(ei<8D@& r h1\c}myȊ Ѭ\#W&\E50WU?$fYL~щB.MLRObMYV -xM2IBЕԵ/0\xuaN9Iyڑ.[?}jfw)cSԦK' hwQ_+'}&ػnTo's焫'O;F{i\6poƅ2yM ǎ bn i *_~"BĚ+Be4%';QIVϟ[uPySG0 L6RkkܜTnnE^2H2 Py6KP%+O䎥eW \\`AH.2&7|J ai՝4!&sE~6: t DhY3%kekg,t3)s+.^5=MwVNK?l^?JxpΝʹZO+7,fW%9Q w-tmMZhӑG r? Yfr4n]Go+Ja[G:s*/&ߩ*%@`m򗞔5F7^wׯW MW7śƘ'sc/_xj\M8tڌ^)Weۢ>\&d20,p.CqC݉wHDN 5AN'KV2YʓlCMCM 0P`46k܄1 cĶy^ۆSu ED`;p:vTTGgio`I&MrB]|*ŒW%JtujOBt|5 ci2r'e: _CǑ9}[Lx6/^;sl".#mG\?Lvl0lNݰe:Z{'ڼY8Wpi<|#IGAɔ@˖_6zfaO!fokck-j?yM'_ۢasa_tma;dz;"Ҵ;7ײv#:#.)%B9/ _ɓ'$ b`"S;R$ePO;Stng0cc4 VWRETr'َc vk0{C6Br~)dJ* _H_I@[pqNN8!Ϛ(X^`|g$BcP UۆaVM[iKm ?* s]j?O ktm`oJӮÏ?1f`ذiּ <ƿ֨Y~ [}.=ڸkmشvؠ~vl~~;?_6n^7kg:eq }&) ?D$~ c?J\޽!>7?e{ε7\hC,Mk I2ħ;-~EpwV+놨CEHhu\d2LG̣#"arLqqJ}|IFH\99# flaBDo ?Bt c߫I{UHw| _/GhqE$^s˿xc$G|}& u]o萫9N;hd0F &4[9B'][gMjnușg sބ| ?ɩKt믿> f<- N˵(_o"u 06gҮkYm;6qpZKt7q].05mwUw`\lKv":~2lӾ-o}mᐯzi2^uQoPw];jow (տKfa^Ït 7#b>uӦCS5xkApp㷐:AoO)$mjlq~_oۮ._#5Kt^c70FrPɃo߾kG^Xfc{tC%{hG3qu|PWmaW5iq}XcUJKJ?V~)O⏓p L&d:Gr0o\'4oZvi*4Nݝ8v(ж; 'YQ;Z*+,INlL4Zw:~GY 4C"˫a5ZǤ 4d±-/@@:E&3>{ `>s߉||Aw& (qƴuKXpB}Lw7cqDf >vQ.e?Ξ~٧U<{if}ԫM/en-if?͛h۵׽{r썳[e?Ǵ% }zNs:up{(؝1p:C#u^|;H7fػ~?.(ţ T$Cͳ:+AJgMl'b40quZj|^ߣx{^,eɱE" UE[ {MѰgeGΜ94 n6=ƙjԨrÓҋlw^ﹶ7wu׮'x'=nS?Pa7~VZQܥ27I͸HʵAkTզjAOؼ4tDJ-\0I)5@nϾ$8|m}~=pk\'nˢ4ugtK[r4e:٣]tnf 7{.w- Ն+.)~}nߎ_!1Ƴ{5u|"mS]8zXO 1ƏƓ_O_P><|3y6 L8]fmHHwW,LU`3Ϙ 5 f<*`nHv*VWQ\d2LGqքt=0TbLތ&=e,}(RPB46Zr*FAWg'-E?=T5ckI񋇷db1!MSP]m;I90G7=, `B5:Y]2__MW6X 4Y&Tnbo %JI=@m QYyEE%u.) |nZ"IL0WkwE d֟nw&v/k^wѸ1Z8߸cܵgF-m'Kwh܊'tR"TTL7`ӦM_, Cw)Qiȥ07{NdLw6E'[n/l ȉ٢E 9c jjGbgS٤3xhʆ,`Hw`[ \j/߫q7T: @ R`?]U}!z(]YpZcm}؝tlϙkCԕY56iNPkiuFpͦi#64czOu/W~?IG_<8[~r|"Vc)9|h]WXgFHqoS!cЖX \ܭJs0_]~i1 "n)p}ZTZ%@(0r({Oad2LF'5"Ae8Din.G<-|D'r9 L, )vNdsU:9ǿM)IL?|5`lnWsV%r"@ؼ]qHQ1wc"T8qEAHL{`u?XPl@\,:\E;G \]zBEFsw\<۴c0QFnlSj5-@aq@Ϳ px?n|JGS/;pl%ۓ*ڔ.rUݺkGFwRW>uOwY7e"g<}3m3qHއø7DrÜmЊ_qe\bͧ7KQ%1>&|]|9K (P6jL\#pM&d2ɩKdE)4D#M+*qjM.# Q 3C,eRL"pFm}9s9v4Ã\+0S&rlm|s&"qM.z /cq8 MND#kWĭ0J09u 1 "0ba=-piφNGYO@.oUgÕv>q9~c8Fl[]4sI\=7K u]*T4 +22 kU)&7c>sn[A[nq#y/-WթS'P97ᮼvi\# Pu*BU5.#9#E@LeM˓{1[`ԵQLJ~8R_t! F8'RhRWܷ̋5r0u/u\@S3ʶ* 6myl x@!_1|>|7\ pOi74pIE؎WTiJSkS1 1JuU\ٝ/ZEf$\\K~>*ycz#sUHs|GNE/G+|HpM&d2eWNz6Yχ BnEr#,%_mX\KDԽ,'z FTh׃񋈊PWGS(daλ&( <&Y_\$9?n@1vcj|_=:vp-w%ļlOQ #)P306}3iaǻ>&Cq9 eRG|T0!" x$r\`\iv@.Mߗߤ6)u^vᄁ T)|S\ULED6J/)1Y: 28K. إJ*uu&pG+Q Fs{)ʊs](DIՔ&T-g 4!bs,v<'OCXjw?H[7v#Ivf g-osL ;v;Njv/́vp1yv:p3+z|(|4Op pӢX bt-x1|UI^wiEUq`+HhI[|AEf“]ߵIZm#pM&d2N#,G1Soi)88,gs}.Mo6bxAsG}Ngrh҃/bFS에c<3rĪ.ŀ61ѫ:%5mxCk? |ĦG,cA"e' O0Eۘ\&9өrPnc u<3aQt}_1ўkձ#L|ؖL>b75'2ߖ/ ' `#)9p3] _x <~޽;O\qGF {Q7d5So`7yok*cXDfe4e\ .V) .]"xQ4gm9{Q]\)]vU%:"`⎫*D MOA8ݦ}]@1S1uΖMNhU;6Q~yakz6]H˱( w8ap{g sAnE̘Q9pr<&})-MPxʿM_XƸBڃ`-i;MwvSԖ1U׆?y oդ#b$\d2LG N>^ZA5r&Y~K,цL$FK/O:4l"T{}?ܶ`^".קРAT<BSd.}*5K]hDjE"UԓO>9BճzZ3Gv93Y\gkbW>]Vsj{]ޛMJѺs EJj/pŞgq?*5@SmuOG6t W.@B]4O}IG~?8=Tf/6&weըO"7jVYæ]Z {2<cnotůsIMm.~~0]]opR '_V"%8Bݜ4"pQ#0*Rob饂V\&d2Mm/&g"W^yT b.}UXPF#g܅@X QlPlK"p=&a s}GjJ: PdXqޙPX0@D-Zx| D<r˾ƾz?!ELD/e-Lb⯛ȝ믿F &M-nz#vCq aQW|;aڗ_beQ.7y5㏴ ́k7W߇{_@`S`޵~'w5gvmMPᶑT2'Z/RX`] A]7*7E]E=+JOM8 x9n6_~?"e&}^O? [~Aǎ]ZKV8 B7;jz%w:<9LAisƺ;4 x]#7=E/"gLRlkhQMnk|ʖ!1ΡCJ|Sl#.8WrFDlr\so-O.kumߐ MMHmWҗg0pM&d2N#覍t[F]kߊs ʁhq0?WuY.X"~M$>ÍS V?z!oamڴ!s D'XQ&\DsE|Lu\Ťha_+kCr(iLm;"zC5Owu[$L)[zu]kMj?S&>\9ۃ+dsp@JV8WpBHCq3}0p8亁e Hymؐӿgxj{k ͤ Qzʁ=?zn2deF#pw=csQ&~m5Nm5"lkN۽@;_~07=bFbaTogps& 5D覡.wwlr9]sU1pn! X!!76/O۲".i5>UQ\^w-Qbۦ|rz\Ǔj㋲R25=B5C=䊭Th4Z=?sMxD4yOv˧8Y<]6K ں}pIw0*# }nHqҌ Eߋ?G@)^"&i*8_5jChNVx_D1iUh>0*EEoAvmW Bmn <>P\&d2 zp*DQ!@lٲrʉᗏi܂;r^4$2&B^p˷'i? |{3y)**-mDvhbnݺ 8C.D40x,E[DBz)v;i~r]frv$C'h²GT\- }"h\o죚5qK]+>7p[wg c=07JUfK۲N7x\RCx Dtr):w)6/׬Y"kŸƧ7MUWz'HxE)ceCh^`s3NgUPP! OVP!- цҺ20Oܯ\w ϟ3ɮMKk2\^o8TM2."r<q_W; MH)EDr~;ۣw(Wr͜?]sz"x12_gp'o]v Ov'w+~n^=.&(ßb|3 ED7q|.%gup];|Okܭj[}IVBȹyX?Gv]g{j&s`59C _UkQڇ*_\d2LG!%Qp&jҺHR"UPKu"(pB8D0;ǒ#G|`drI40w[EC? Mr gEI0CF\&L DeuDF~.:xq19b _$vBtt4%;(NG~DBtݙDnl4tth)]kF\~ܺN\6kzBnNɚ*E߶^\Ai,Ha04U`  H*.ˉέS'>_n 4gΜaɒ%_V J[V-R#D>n1IۑWѹU9{S\>Y6rïPBAJmĤr0Hg>ꪫn- j Rk۴e}%iݟ4#qM)Fh:fЏ)+#.2{Dw]PJ}."d@g_,i0GyhF5?~9/oI@3RuQKA9A.~$7_7ȯYr 2&]u~O }D7r-!qnG+?f[{,P75=#\Jq\d2LG!)ѳ<(y%/#h̕1.ܝ8ŠTE-c9ZSуJEpVu;ph=b8 _" 4p\Kpycb8 W;$W }wAVa)&B\"y?bk}!aҤ>5YzG+'wձpwc"Kmrۮe @ff\ιtx>lnڞ Jds#/rL!DVNh)l(@аaCW|e hѢEкuk9/AL\M?Q禙ӧ ,5k2L&4\"ֲ?K788r.ic hڙ> q(~F`vRWK*IG|vMviG@T yk;I&,98V\oLuMmNn[>s^^ײ5GP]&3Im`ߓXz(D\sMr$%$$' <^=Zp0HVp u K-oٮ .Z7s#p#rǠm[>]ZZpowW-)ρb"?p߫}^!j;,- .LT3HN]g' $zwnoDG'sew">4Vܦ1Χlw=se h.!+%+]ADRQ<ӵ'^7)$cjʕ+jb|kOP*)ZyAءs \L Ly$}Ѻ:5s`4|p=$0HΏt lyk lK" b1$ oo]؆}k=~>Q7Kwc##)R Tch_4]_9*{^SM~"v$ſ>8c+]f9pGmPHc8Ѷ3 O ]fp\d2LG sMpE9/E3u7vSc#J r>jrBq9*ZL)k"?eo>{uQpLr_!uS`uZ{xݷI@u& 5s n.7]J28xHiܲgc&2jV]û:ߓX$gÜs )Cc.xMJ{Q#}|W\ҏ{;v(+^]C>^Se&U\cuAV:@Tw rEJC_JnC.rv[f{~|@a"cIJU}bJ)3fNw2Asr%Iy ۞H%JTd)R76%FAH{ c'&xJ؟F}=o޼c`"EKJ|}G3&Mc3H I>x*8SZFҽH7Qգ>'m"uBVc;d vFQcõ?.p1g/AQR4=:dis-Ym'KD s`K@0\vaT7~\pֿ%I]mw_fy# pܔ@O@W*i-G(|¹wF ܺԌ1,{1!IK"# WZ.΀Zqqyc\Ocf k"ϸʕ+j*i{w*@G}4x 5L&ɔ QM@UA!wIUc/{qP,QId6 +'{?oqX\Ydk#'pY :]|->kȁ Mػx9^tx}-tPt6Dx5eߧr "&(Ţ?}hb Fo}˵~g0WzD%) p}t7*'@W`nM &7,wDf*uO:*w96#ڂ-;:k:V|;ѥQ_IOJ]MW>Ch15Y9j+(&WE*)ePq`Qt_ܐQTS5a*Kc iB,4 OsZLE'ޜǷF#Wݱn?"_.TT@Gos&;GDR2 @j@˜i[jF`T}^_ ґ?mBa:6cN=D@"T k]HNW"K +-y jB p]2iIW%a5 [mo9#ai1 t9rvlqmŽ( ˯O\|@=s ڦ "\'7 l}xG-{#_?f‰f>UZHז~sXʫFHZ7HRH2Hk.2k@,h3kɡe{5VVt7ln5w5k2L&).b)F$|rؖ ǟ5X,h[e>-β&TT *Uvo+Do9m]{t"<#bD1q(mv %]mȩv ]Bmbrri.ڷP˖-]>^\gZ͟N=Ym}[NRT]z-NA7&_N@kvA{oA8 #T>Eq|~{Su-C<KafRL5JĩO@4/4NX :w ڝ3-\(UfvmMutPW-Mm.aW`u|عi};~j{#:gY34 7.`RJ.V:J4rߎҙ>i߹G '&rjx{$. /4+zKU>čb^1v}O~gmo7W 8 /tǯE|/>b3Es,+}L % s1׆oMbg#>ꌚP sOmd2L,9&DF'8@:ypQ!D2`c/}Ź'Mu&$7rN9#)`u}/($/b{owOL g!} OWWTRhܤrL>x@Y}\% \D_oGJkzŃSY77Oy3ci R0m, d5Ւ{u^ >ΘVnݠFBt=PU;Z* ؖWj-/q^~e7^>#XNn<#?unh4%Fڷz+P ƨ+u/MWXjJs~ SHWs4X/Mz+Ğ;)S{k; -׃Ƶ ÅDr9•Se#9gyKڡ0]eaNk.Z5=\bK@/) ZrL_ }2EodG3"OH~߿5Bk ዥ$"h.1;ݪ5=w|k?ݸ_8dOT[n sΓ2r[5򫴢n@>ByGmo&-&#ם/ݶm[pM&d2e5-J)XTD&RNmrT ,-3KduY>%%$+8N08W\!9?& Kp*>w1D(t MTEflOn^^K/K|n6~kG w> ?(<^<"!|S׹Y |\;|'&Kk ű0#XpJarqg$ <;9p L&d:%SA$nj0\-f-A'󜔈L8P,f8eBQIm1ǡř%jL _ehlʡodW^D1\_5ًH76%7qh"}anb "U`.& ɸId-yU埥Gsƫ%GZzQ$Mk\js̒\z _>{ϤW>Drs31n@ƟJ׋_qOZx±|1~Sj,|_J['gΜs=p L&d:@10SuB9a2PNZpq$W d2kfה]M./\LU <p L&d2{T%CP4d-N`УGpM&d25533k2kffה3<(EBPbŇnV caC:]qC5 7nt\d2LpMp 5e\ 80'xbb%.Uz1ى/RPvm7\d2LpMp 5e0[_|Ŗ>hPBHܹ{ݽ޻Oc[7} 5L&ddnNTRy,LQn0iOwoOYYGK \'k˔Sw'>ܬ%h+WWi}0w>cO>hfegr[38L,%on: *6g&hذa[n -}ꩧ's%9sꬳ 4~5L&ɔyzrU+W.xWoW-YV*\EV+xffYj/څ) w2S"oO*Z";㤓N6р!ӭPBY>v~ .t~Be.Tڅ\TH{lIԬH"i_P<}̙.+Bvkhoauڴis`n'n>ed2L&S_~"1(S'2X3,ҥK9Aɒ%O̲,^DPX1y>9^|>>?M Xmso}1GE`>1;vpg,qKѶQ`#_~Tc3ĵ 7/PqA57xd2LUF*>sr:kfՆ3 k?̎-E}e]v 4h"0$"pG3^⃵kBʂ \SN9>[i40;Reʔa4/r0|p77A?[zA>}(a2&d2L&dJRcƌqI8Ѷ {=E%vIDǏXF[Z`ʔ)8&d2&&d2L&d:@ rAqO`ݺuֹ&d2 L&d2L&Eog}~*kX=]MڵksM&dd5L&d2LcX[(_{M bŊD曷7 $L2倂e&d22_pM&d2L&]1k^z+ y"g/;MLPhѰf͚Lٟ ^~e.a&d2e L&d2L&1;\{l֬ٛ_|N8!;>|l7ukqQ۷o:udQ&d25L&d2LcD&L$ <8xG]K/4X|y{C}ؾ}{.BF\W_ei.r~w2ί]nvz@Nױcp-C/zhB}9nر!#+,>ND D uq &%+W Ӂ[~KuLk8&B{õ qÇ="OncjnMV!B!BY x;|3j۶mk$6eVZ*;w،3b ,0U`?Tk׮5JG:;7 W62i,Z(-}~|{։ Сe-PVBB) P9u6mZCܟq˖-zw5kܡϭ۷nGo7m\pӦoQpVmAunݦm._ևzO|vQO*U~Ա۱y ;vRpƅ07mPm 1ܲJ/lo !pC!B!,`#{:d:0>@T~~DQbX}UH֭N}c8C[%~l<2JLA*0k֬JbTw y/^sb`1n)&}w r?k޻kn0R_wSw[o5ă%owBn'_[G.fة sCe& q_G.o~zn 1ou]u֏pՆkB7B!S(d!R:['fbI*T0U)))dȢdKUR ƶmfݻ/Xp^Jt[Է^5˾}@ (y*/Efeqw6s&\IYt2sQGE{^RCC/ssnDNnBn|!f׳1GL_<&ꆍ놏 1/aęCQ=?{ )=vJnnԤi8b||{饗o9rB{G)T=~?f^J!Ir-ʿQlXʕc-ZFYB!BV{ү 辯N8KT-EeHHl֬)c+VhЖO'(s#Onڴ?$FܠA E P޹s'_9"q"n|^ڴicZ@(]cȷww_RNUZ]u1AFn`;8(sC qG5R) M͞6ϜDx{~uݪMs熘?n3ۇ׍:m޹ĭmkԨBjGQ :}oRv j5J Tm_J{[kH]`[[AFZ}[`m}^^Jg]X*_(nJ GI,WEC]R)3Vx[_.zƍ9?O1 B qK/4v'~$}p H7ʍsܦ_u}b{.V'P:^``-Dչ.ac1"(+ }/zGuȋ1 vM|#E@$\2Xm2|F6lHZE[lT-TB#˗/pDuhTnS3-[[2} 1 7B!4ml^k dļ\0upSZ)e@RNjS^It]F5}AP "9\hH .x-7қUIo ?@+<ɔ2ԼE ׹KA \"_]:WL\>O~ )#^u@v!ny\uÅ:m!bn!B!ܲ`)>bՂb_ F+uUT'zNƍOSq UjܭHmY0x*,@UgQ8*J^mpYLy:ϪW);v8wex( \=}Æ Vy=]K)… \AH1 >[嚯߫⩊( BxTwWf=}B!nB!n\ a#V`,TRTx2Ss>`{tڕXu9жIGoҥU:p[E[[4ﴽ/~o{b3QX1wYx-"eJ*YRXjۘҌD3\4We )] .B7oLSK)n?d|B!nB!BE)S#`IK)[Hz/Z+yYK~C`?Ο?t`6U/ U3g/{\Q~pcQ9-pzT Xxm(тLkXeOrp&9~]|cQ2䘠oLJa\)\s&{7vT-[fM!nKuS~9P쳱n-o bC!B8pioe˖4RۢtKag0K3:g.\~+\3k|%,krjc@Wn|໖P5-~vf2%1*_8N|)qQrHD:7p-n⾅۩9SnHA!b nryb>f;:v뭷Ɗ)b6.ج5k4;^y1IH TfaDz?b%J0w&N, mTtM/x1{<}1]B;.R7} : !B8$pxKFgx/تULmƉt@]*K)]#(U4Z:#ҢC4P e۴i4bɫԾPy?@ %]|=DC~B`q"eQn,-:.nu#qSrpC7p aougJ(5\g.X…M\ꫯ/^b[ŋk׎,Y2VF ]vY\b~hJ@aCco;W\$#"j,I|˕+giܸI|UAP_/3nUW]. pǵOzV^|#҂d~jmқo{c}YLL?kW^ys&{a'!b-YĠ,~K7@ut!wBfU~GifH#OʖcQU~{KL>AqKgH WtT]畳Z0͎glx,0*Z(_g?K/d}@[$:%OQ(eן{5\ NT@eNe˖s6m ۶m3~߱ /0E?ϖwj_zD՜%~EoyOP}UB8>lX }2܆ߴ8C8D\0|ÈwD@.7!vR_,хF%yPz@ ও%yDyQZP& GpG.010sѱ_ 2/7=  P.FL0a vW޵PzKvM#uD/t5?=e8?`ה^c 7CUuۣ߷`f[CWu{.1Iv(co8 'm&m,}mzl /K_Dkw3F&hx=1J"?aүaʨߥ[>߹sgZ=ӏP|>0 p (TQ}Sc=6Ac+Ԯ9KC)ZQk/K *{Ş{9s=3"k'@gG*~+E˘ [ T|mBɆg9Y諰;Nyovs[lOgT23Sߋ}_~yoŮ@]T)rE]S#}-"\P$H ؃[l1{e˖,hq(BP )))ijXeӦM="ׁg!:#ZLz<79v}pNNs :3K'] yΜkCf Ksslϵ~3;v1Mu7u/s\D+;&}AF - , t8%y?F\ĥo?oI?L>P.<Yd?}}5,m;e/n?7o6M=+W7 Xrn[SX4(IKc^(ZQ}yGqӽCY.zTA@n38H|&ب^5jW=믿^qҿ2 >5Ŋ<v70^L̔YIe#8{HƖK+$O#8 ,3 1vABr%u@8~Oܖ/8[׉ϓl.%|͐/e$Yq$_v rZ6a\9Skf_׆1\!zsp򚛺؝g<ΐ'oG>3%&Bb<cC{;!Qen:HP0AGKCBGIA2J yʢE4)+Àc%@hB;όy8<'~s} D4fI>PY HQ"گeM~)?&ynvgDgL )0z>`U>h u^Kc2gRFά5`u}*s{-yz ꫈SEtm^5?my7; Ɛn,:e67a7t7`<#0ɶTw;b9xal7|rl fLY3:gomH%W 7ؼ>UߘYk{swB^ e&k[bYv⋝"[<oҟ2C]~Ps^ΙڌPyGrX.:I>|z\ؿ&اDOJ.R9Sڦױ/clKO3 EO]|J:۹/pOXHk%!l/,?>0IslS\B "_&MJjƞ䑱1kQ^!c;1%'TuM,^,`s^M t+cU-s?= ey8~AQXjՊq/`\{+[$]&R+?mB1x:/w4eFwx؇gCL=p_11L>nz&O!ƿL GApˢvsqG1.m|{<9B8l.FCVyzؠVbZ5j6F<Éջ@V!uoAOΩujҳJkJǟE炑 [ ^9kw5$tj`0s 82|аws-QPzqRtizL O==bj>&ON.A^hu]q HM֎a=L@3YAI<,ǵDz꩸YԵ7a<~\_ꓷ9;pXŹ׉7/cɸXJz5 XI˛S.ܷ7۸g4'k[ K:)u^?ϸܤ. Z +z62Źr; ,)e! Yud>jiYU#\7ʨ䡣sqqZ #PӃqqD%qU;O .0F:8 F@ ]~;y.%fhL0=F'K9HJ2Sf1Nb4Шd +uH(/ JțE8g`N{7x]:.U!-hq?6^Aѡg 7w?l*pJnsW[g*t>Uնu9"7cZ[fNwUCWR4cYS״J%2ࢺm^mn^`Hy(CU s=x`$WnyWb*Xƾ_ %]ي.VsSHq>b^Uܕ36u?iUV[y4sX3;_*>~fK'lTrU׹K<{LT.}Rk"\aeO[lԼҝG /]ʔ\#e|RGMKۭhC@<ڣdmU=6)#ڿi 63j9DߖJbF>G6eF6U ,b;QL_>}N9`-<3ٝ@_-}-ju5>PNQYmP?N*֠b&D*]KVklr5Vr|VYpKiSx.j֮LFd۔~ܰ)K fJSv|*TU@uR] ,ѿ-D\.G՘ Pʡ0~4[j Йtwx6`Mm7 zn"p~D\c0Y׀on硤$`'z @K04ȅLYm\`•M & 5y|`)XO<9CE@2 fv`O(p)3Fy_^=ɢtLfk۟/d`0r-=7?tQԮ@O!ޭ? )ƭΘCAj#f<{LTRxǮZOǹw Ag%lΑ16m1#y@]QaiSWK7 )*y'>k\ wͱ,tge19մ<pC8,. "F/\ pL >Ӄ{}Sg1(ٮʫ1(tn(y}ѶjWQ^Hu7)VY̧aƙgu*uG{@ V!k=v1 6u Tr|#.t ide{t9z 0P2dsqwC npuhQ#YݻԶ>)Ըv[fMw}|*5(_DXő'mQ߱QCc,Gfi^j ~S/8Ѩ^E^߿l0@6k`"Rc Oy@@ǻc"T}ʗlᷫx@m? 9J7S" O+ VGQ' Gd`p&3Z/bR|aۡr>d ~`>'.W%U}pCJ JTos+N[>ZԋqqNOQ; XW DMHP~` [*Ccwnj%84 oFQ\z84uT.L~>0n.- .N:$JiNקuw?@c{hP>V:굷@NR6Iu#_aoRdws&%٦6_=U_YO )/1pR74(,Mof wW]("Ucp/(4&!Rǁ˂)Ϙ>h˥g-ݣ?jv.Lj'{!B b޴el ǎ Vܣ<-RțO0@25Se)"`CuPO=M];立.[D`]=W(ѵNXM*ˏyۊ<WU}~V]5.5J͸J^_?9 k{IeAѳdXn!NW=U}uu\u8z/v= sA߫ w /nո2a pQ.!/5f0:1,eHpOf[ehl W*)-(}2,!ßFtǹ|~i|,'}P^UyF|:`y:V\ʬ&K e@@zYFL(eُ[Í2O"[Y&&F, st VL}6D=$[j@Z8( ":[mL۟m(aޥm,Wݘn(mPHx%fWضQ͔#vvk]&sup+ ~Z`Zk;P T忽 pL,&FڨRQN9L˰߬۝R\u[\ϋ[>Y4a|_yDױ[k{Fa]˕QSuT63pjWaЮܠǐQUjԦbvoo\^碯!O>hەL&Fm4}h4sfpIG[8Im,V~#:Mү<+|&ԥ$/zi1p:vZ,he0Kpv|Q3V>N7-Ehe\+w?m .fEjDwbf5rPEeva_'XgIGL__|~Q?M2w''7OobWS~AaSc"f!.jQl } vS*>׻(p@A@m ="_,~n2ZqGuNo7O؏@٤3y`V\pׯ_ע^>s?p駧 MF)vTPԔPV%BXl`16jMi՜oAV'50d)0Eߏ͛.M5rB^E+g~ Z6ȝPq@K2+yM Ryʷ`^>&`3r P QתqdIZJP_XqCȬ+F-5v{0U%hP_H$ҝv,/3My`~GKͺ&y~p L91HwJ%N)T(~PLҰs? X,`'-JWLnp@ zOp>ѳXVf1T#oCOs؋k'm&q,0#c@-Fo#vn^"NꝬG~s 2Nm2 ;Sq{|25]ys`_\́\::~ky[Ax˳713.ąBvJj۴oi&M/w~i&Lt_$ERݾ+mAnQVYm2f,/. .u_Un+}Caq0'Q%&0ߠHWyvpo\D[nHx<}+"9JW યGGIEc .}}$>1>Fy1:(ER4HC}_ -jv$jj0+xI`tm;~uX++:/p?X ¶h;UI͔[FQ3W6;Ye鮁cr)QAҥ cyjpC86Rv 66|>2$sN-(_J\&Q/FOFǘI@f' ~2@]O/G L=kH|ζMoMx+]ZKܫVo-!V? FY{Q֓:ԭ0^C`i}&}wye\~P{jO<[6h)A~(ܖ_=AvW|^]AOܲc P:1p = p e:rg*7vŪ(䁦TSu_9 Oq*{;·ΙlQ/|azx=u] U׹z=!q?uDV>jpmGjϹNr.HWhI[95Quų TAc<TFkbn/[Z:4=s"XQ`SV@d+`(OJ?|9~ut}x@\ƅ qo;*9|T N+lQ'"zS03ILѹbz>}ݚk{m*l5k淹 JXIt}- UgWe u(0 ]߳"Ď1ϛpjT5}ȸdO݃'멾j1u)zE5>pC7bѐcH_cP uX:wf04IgQ1k`t| 34T 2ofևmt@/J'i( 3@7&`҈Z4sKsT/c t8k@z 3pd!%`K;T ֑mRg{ث= 8?_I9_~q> 않@cq.#Ag=WlvMX 7eށ8q^t|⸐-B0T^1UƩzZ yW!roWQkr}Q^¹bHL}T;&PǬ^ Wzݢc5xG"ۨ{M$N6=u|wsP&1+Hc߿Y)pʇ' nTJWR`x_Jw~_Kl|$F m\3+~f8V=S|` GJ,E̥JpU'UPcP#唝t"~]AZ2FdG _ۇҦ^>/ij7^kta%~8|3j3 ڏA1Ŝ~.B\yCM_C^Rt+3 ;ML>;* !E⼝ʏjײߣQ- o_ +`O`/ۜ\p>}"%%Y ď-zģ7}^i(O#Cqu2.@5bf8KĹMWu7m/";ˀm)U䯿@+drlg2@Z(ƋKY{B܉jAq\ PV9^5Rqԛ 9@-c^w`@[a3t' j].ѰQ\zP-e)qWh`EM'zNyAj]GVKs}=21k1;yd* n 7*_&Uk ݫc#s 1' 2)C68%\A^[+ R+eka"]:ge>r1[E lyZ ]yȯp+aPs ?=_(Fs_ &F *in/# 0X. h7K *䉼љdAyFu})))>ޢn5H] 0ohXԷ(($FCm/( . f % uHS&܆ʹlAI}{[%K| ?/~1,/߃vKp5ө{jpGo]LT`6܎?uGtuw)T(e,y]ճujF}۴Utp j7Lz:Shv"ߡp @\&(3]mW,*7 Yv^{8\zT8=/jdW9Re:ANim3*pMYjy},Ll13`KUr69U\.}5)MU~ga3Tg7=B#`]RznW&61Ftecu5z|b#B)"}kZ2zԔj_'P <Ly#`"7ӏTSJ- W z |C*u/OPe4أ..pIGv{MU,1~;xEyM5&5Akn (spQzLc XQ~ n@WhQ$A} k>_EoAH_' :ª/sCݩzE5-az=jF"h/nR28/ (v2oމo*'-Xa7sQ9AѪ-L;7\_GhMi!N;;PbW[>pC įs.=MT"{A'lD~ F!xU~) zDu6 wSA6pܺ&2!mrWE}$ Vl>tM={K]bpL@ }x+phʢp@iW}S}_D HoRr͢lpu=PFa p1dl8J8=أd\€Ĩ Y԰q*s^"XTwSZfkۡh*0cQN1m޹DnR}Ɠn3- Hi G'w+7\J-7qvX,;.;eX3ןF!I>@&QF3mhiG}6K&KߠSyއ+hѶy,v!bF]@ϠI=>z;.KQ25)>çx߳Mg p N[u4|E U'G1U%Tp7ʚq-cMݩK8 T~Q@5{2=1<2ST/v*' cS\]c6#hAf㸕:Gq _ 8z1 x YR^@)<6JaW0vWb\kkt-*ǘL>V3x:mjų|ΈzvOP}cAn\.m7$TC0ӿ<28aC jF.[d$W^MJ礱`VZ FًlI6=E, m4K|e7? s&>jըÈN,'U>>_27?@PE"H @qL:، \A J Q\}?Рj5˧:8z e{[?4kBOJUsk?sl|6 ej|uBC/ݴy#ݤC-۠$إ;g8{wn.dnPzi`s`4ˍ9XX mU+&/p6iD^w~ -)*%K} NߧʤΣ( n%lcOt?H&ќ` 39W6k76K.^c>vLI9s1E"/yZ\;{71,/J`n!dXx~ KvA1i.=`_oM?zEoQo~RI l`,uA%w/ڱ`Q-0TeP=3\䪟!@ S-!@U\TD ؖ$nG(d=_L n]<tWُfee}:s(PeuKI4y"mל\lOƈ13[GF 851Zߙ~۹~ex_+?dX)޺Kr ףոX܍8{UY^E\؋H 8W"4RMXƺjqQj/|'oRs6NڛzGcc+&5KWR\ "Q7 .tjsplR?ENɤL)*6}yrWv?wX\'GW٦2/'\K&SF1,1sb144  UtrpHa^y6K^3\e%*K si xJvrÿ:knH(b5z ';}+p,΅B f o~r}7wO7<奞b\:pb֠i ݼpJUKP3}Q#[vxY"hz_~F#w7խbF}GwoG͓g򓯞?|pT6 'm_E=P5ߗ \gnE}o pw4[owPIiM۬}8NpO;hLF -Z6W'G\\ LuPM'w+[q*|zwB@?C"G NOp`V@עP0o:UlGn>mĽPx`zp(9IAoJ~PI{M\غ*+[x˫貁 4/K_בrS" >FČQs({+c ^KO[>,C\^MgQ/;CUطzVeSXp9P_85e\ +UGJ%o w|z0?QM#4&4+ĿW,u" F\++\H'Upy9g v?>dvn@˹[Z3W \PYR/>祣G DO?hn]})B6⚶-ms pfT9)6[/>3S_ݹ8 DMsrYGy>ɴ$"m}ݻ po ˂@Ve`|n*1>x"H!EA=],EA۵\-bf.̇nusomt2SHjuƪϚ|} `Bey=\@mI r,c=r BWѣ'LDA%j iӚs鴿E`.++j?|T_V,l[i@z?,.D4E~`o(wJIC_b#d2\ FN'r yu0 e.cyUsmAnX ȊM2F 2~AJ^JS(UU_OQgkcT׼jw[Xao\(ןELfX\ 00GJjYe_JF+(yiƍaO]ߛX4 {u ҰB D<&p[BGtέbmR_V(0}l\3"P7 R"Ym*j'L4=;3݋3^)\+'+Jip\.אq8|"6a p1 /ka[f _ƌKp7}#1|13W$\ٌW_6(i4y  jTYDҦáн+\ecC t XP.qs\fn@cTP|a4\Ĺ2wxPʯ9s1PEӼ\ՙ9%e"Uos"e\%Z\M\Ĭ~y^u {TK Zʕ\ֵjůԮe pDp*yƅ**[WOK+O /6[;*50w Hg ;Z^}Ť6uY2jbʇXπNcx1L!%]ͺ\9OХU!Ba {eޣ ! hhhi/h'՞\S/F%3m$W}-SׂCQͥ& UW$P:¸PyIy]d?/syg!ۄTE̾>8 JmPˎ"k06.Z>k4jk@ ^۴mwskPET)|wYOr?ͮfvv51P!L m+ n@RuE.wYwOvn ]A̳ePV@;>\cj0W cFCZ_5xT lVN E4{ ಆꖱpG9JR:qAz-;R_ĞpXB9h3`?QmCZ,Z( TN{m\v!}'\?a WU^>mK)UsPsm~(]|+H6 0?@q\رW @G{A_虈ut0xL8ޫK]͜<ȉrL;7$:V`Sk`*k \NDXpO.۸t_y|3gQBo Bz8u؏CnjABVFdH+3p#fʈGu`+0*Qu\=0>+L#㼂0s|g_0LM4*4~ ȀtTtК_ >u@]/f J>$&DODfc՝s_#DH?xL],yyR+* ZmX ࢶ[#-3*v"wr>EPۮ2/[\'\Uj׬)f9oɄ]\2ޯ+ۇPN\u ֊|6[2|U[t1\Id?WQ,߻Gk x&uQ+2/7 BkNN2@m#HY6Wi4?qoO˹0'( *&UK^}F?.R:* ꌁ.l\-zPp\\J l6-ANak9 L\<߈YY M06[ p"<~tvwWm8n&>QL\D-B<`F==Egԇi>o1P !GM)DNٜ?|3 ik?^^ { (`)CD-!prC& S&q.[xr@)T? :pe>KX땸UVt:ոlAxO :8 l؄|XQW]4IR\oվ64{+IR>]4UK@jJ]z-&58WTnjw 4F;q/] H k+1~<*(abʙ 2L v2j7|7^6?zcLI䂣Q(_⡻̨޷)wK9n~ ^4Spg Ѿ >{ iXUKCyܧ㘸5iFN,ƽkߩ#Q(2ɋ0:~f vo" +h[תaioP6f-[ƀn P7nyRb()x3;e-/5]%]@*n-V㩧]mV(ܶc3T7' ,y"K<}(ǎZV-""X濟WLqxk㿜Cf(thG1`[GXo('*lc6q'0ZAIO1?z@&>'/m: ȧG)'VGy|dٮQb:'|[)gzF@XWTߖ=w%TZ.ְ%8Cc#3ce\-U~ Z-tO? ncXAuǜ)wq1-(mP4*`+*{Ӟ# `ڮ>zkyKUyͭ\tv?cd>1"@:]uUN㤷0}Sת*x"ja\@P6ƄCs-Ao}^5,qKǘ0c`|6࡮\/ZI.x 'QRAOuPY*\\HNyQ1w$Qي Oe@ s n$49p D{p[g?SWR'ww3_cn4Oϗ70V.TKXû%*EUVrֽ瑅[f+zκ0H7 t\c?!?&Lk0j{+k4>^g?fd|֬jWдWAʬ6~B:\zs,zU/8g p181>$Y.Tʯ75@ ձpy F} (F zKnj:0ߘQgp\p<(㑿oX5y@Ef;[u DEl\b"e G 3wnܕvVƘ)mhi1 L^g4 3߫6{#Y\* .3*Ӻ:W~ٶ_^7}(17;qSEwru6 u7˓n//X8fMpNnجt11Tp;80}9-2y$aV>er Oh1dQAB۩kMʠE)61YhCp}o#cTWxxKSv2 !C_]$a(F[-1OTY$ v̾\,@\"XJX|ٲ+RdI>; R8X;T NY-TQ+]b#Ϭۼ}/(+j.@[ŵ2` p bꜵ}k0bpy\zo!iDkQN@} ոHhc Tm&O~*U#}=}_v1 `g<]9JTQ 97º5uŰ@t]E d1GTEA@0T@ I@, f?l Ȝ< nNzDyFεij OeqxɹY3.B&@ΉܟDKP ॶ]W_^lZ KγY8;6>p~ 6UFƙ p5ĺ[T_gcyo H[a׬i0Sһp PqH4HDF@rp _Be]i`x'e8x xgś/֓2|' _@kD :|% 8؜G27y1v#Y-Ic38P:~>2 ]L;-^Aqo/8Q3%ތ6:Elw\_k{0(xK{0.!RKT2tM2蝣2Ow{ƩN{! zwѝȣJWpms>#[pe;I>L_#6SUc/ڃi er(׊ml1qA+@i.,p]:ev̧`6#sMB\nuc8W;Fw'R_'ap:awnw׍8?I&#p'uuyn3,`d ,rI:F= 2X2.7Kk(_'N8Ѳ4M{1WtIQE,nPWx3NyO ʼn 1.WuxL:9N ͻ鷵yjm тc}4:1f I83puAxdھs/_)o9V"iB'f:2ӊNkhܢȆ"%fx'yc^a%߷?:Xj CTӢÐ'tSGi_su{INnd:9mtR1-P2^~Oel({ I??~V{48WxH[:IuNenmyERQqyr44ͭJϊ8F"ܯ\r~7e pYFMo#ċ!6,1wvE._ n>oh)X`-˃@}kw~T} 1&pt| y[٨j:MNX`Kq/9޲*B _0@,Dg ~(2QhSoH@4M(`nt؄^q)`wA9rve5ppWyFx[?C5 0gcPkS Mz: 6W1,qdV0q܎U)`}?Fe8-x{iYP'Ml@6"5^op_ϕ`g+ɕD͉r@I]>V'l9?wcEơ ixЬq{Ȝ5֝kDS_ϻ59ZG# d}he#_&.x#l[ծ{pΰS$:Mo:ۡ\s źUJ3UשZ~"?d1Q(+j{yzAv:Bx-1Kq"O1i1[ԏ}?H z2S0^ܮDhmX-\̬w/^ax@DV׺ҟ(?MxjZ׸c؝ml1PaeÜ?S͏qb eZ.P6vNGesSsCR7FNfp3NC7KRc.9Hp,1N"Q,.snF~))X Бq k(U7ܨr 2K :8o"+ԙXќeC>2jWupKyN>sBa~WXIna.yƅD놺@úcѕF[gdN /P$ft֛YDСN[ T2d۴l(}5ע,<‽?;ro6;,qfݸ_}^Pva^Kbwiq̗>''oq峱cB/%jPdfrxYtp>y8CXmhO{;1+ݥ 'MpKpc|8 d4~{ x[`%Bn s%LmM+EvqqLD78tz{|9Bnc^|c*Y<2F%1ON&ϵ@[m<Ƒxxh *NIT󧖱ٜ p B{0_w&1 @)7FQmG w+px1o+7{W`nbWGߴCmp|%04,|3F ]v\Q\$5(B.=Hr2O!܁K.qY"\|8Ru_yݝ΍l-f'[\2L-Fi˽Vm-%JGH[Dke{i|P[2UarE^+lY.dqCSs3JGڀ[X ~:pù]\á+q N@ŢD(G|]U# 9WiOf>YgQpSa<&fdQޏ&.sx9퀸Eqp}8l#Wl 0e޸*L<'2` .mLpX&:4y;e^ȉ(U\<#7&k~C8K϶ש{;vFIlC|Aϸ/" pR+~X>\1m#WmÉ"^X3\^8~#Ki#U!.'O@'S\nl)Dܸr3) 9D  7r{dMs= oF@L>eZ^'ڶJ8^*'r ӝyg;eqxO"5NOfXP-'o,/=%KuWfIDՋu8K8f8W;m^e-bmdЁg.N2q HQ:/ja28A48"lǴcwUO/yy%|pKG^ܨtUp& lQP?;!yXbwicf;Gƽut ]+q_.66/>`w  pw.wM[ \pˇ]ַܸ] [rrToB]+)8u2'tV5K_2a^@mVRO `vx3Y1OژyLd\ ˱c>:ᘦc/: D&KN}дWpp\{{p{3,ocD 331\r ?.; xLP ā2QGz uґٲ._@#82rV;-0|R,D/m}egvbp*lttHu[ -! 6f%Wx\`Ίxs\r89Do@UtzspE;{|MnC΃ Gj<iȁ|pq=õT x)eKOӁ7 W`qtcyf!w%58Esa\hXoE W$Q\9Fd&.(2Lqd^hoŽ9\(+ܥu x#((#9X/ +m- &h78M]Oqr<jutr/ i#*uQΎٻڎ hOvfr '2web=p˶}@_b緬\p/h{#`G D* YAߜTkY~|iQ; ^OČF2Q չS/T7!XD<5N:XgGgbʜǴ7fnF >N J Rp"/` ZL=} 垓iwU3:V8qr5'  #(C ByH}-„a<J'ЍB{iԃl/+"N@7I~0M*Mne80i^Wo< `imA0s Cv/ RhWͩ_H;ẍq82h+e7^o&u!#yyͩPQZ `I_\q|Y>aCof.ơ űsOǷ>8OR?\Vy3yc|Qzц%Z:1+.k|$}ݠoVJvxy,bSP0 BhKV$aE#cN#L0=Zɩ.8!x[eYr]5тЩ\M ?*aBŕ;-<@slYVT7i og!qCpt˽epSiXFX<nLnf)JDKn43C)MnSXb^ԁeQ)wp"U p9Op xPǤV@>E !cDX>wyr4f<89@Z;t"CqjyV b̋rx8~pp| peqF:8YSK-J5 WP#"to\]I<8}eYk6s!^@q{3Hթ[*i,t}H8_},˪ PEQhr&(n9pCM0NXnLqko7Ѽ6jtr˼1qJ9ͮ[\2u1 p p-˲,25]eYs(&6u[\2u1 p p-˲,ZnFI~AY߿Rzu ps]ޠmCnu\`+.+d9\ڛOJOlqⲪt#2,˲,k%k7d6`~ꩧ~z5UUZn=su*iכ[&mm]JUvtuG>]ݺ*˝oO﾿Ccw^|nZ 92}wYut)iƍnժ@\ʲif6F'Hox*+u?|~ivw\v2AңGlXeYe~|~ԩS'9oy睇կ_UP^Rʋl]jRVZn۷Z}޺jZ[z"K-MmZZ[n>_ܢrvucB믟.ԫWٝvEͻK,lm-wvKYvmv`խ65֪Ye5l{uԨQӧOҫW^͆ekYeYUW]\tE%\ԭ[7n5jRjdNVxLUs)uQUV-jBm"p njwAB]^a5h 9Ù^;ǖ[n ^1^{ f#8"\ְ6ǧq.e]3nst̫meUy5jTO<ѣG'3f͆ekYeYy睗(6!?4j(92Of}uqYev s=2jǥ%{gc3w[pd_gjժhѢn:sL9rdrA|4C=Tn׌^_ݸ6|c뮛6l!C4fW_}5b-2ٚXmau]}s)v1l²d;餓3w}w"KƏ ײ,˲߯뮻.˒k&9묳#<2qqYeOq:s{r!${W"z꩐7}t%KZK.\xk'mQӦӹ͛'_}AAٳ{챃*WV/{c{nHp&:xMZ =\V1eUp>|'D;vl2k,lXeYeYe^yOz=\R:0@WeVy o@[\?xUVao]v$tRD™ G'.`cƌq.eYeYײ,˲,˲* (68p,i׮enRjժAN:a7o^CV-UL6\oUhM74Dr9yÇ`],˲,˲eekYeYeYTD!Q. V9e,I&)66dԢgN5? wҤI^eYeYײ,˲,˲*Ȝ9rdҧOSg믿u-eikmONϟ:#\r\4-/0nݺ%~aN^˲,˲,xZeYeYV?cr&]t֭_I!:\sWԲe{Gyoƿk׮(6 Q @w}׮[˲,˲ײ,˲,˲*Oq kHp:3[؃6l|*Ur wvHv4ܹs -7eYeYVdkYeYeYLSLI~!-d=i+RfʹRJ˸s5j/dʼM=7\2˲,˲,2,˲,˲ CJbǏ KVVԩ6o޼=Q L2ndĉɜ9seYeYeYeYeUM6-iݺu2v\ٳo^bp [K/ɴ~dNzKڷoopkYeYdkYeYeYD,8d{׏* ǤN8[nud֬Y!vĈɼyeYeY+AeYeYU4z;.;ӓ+zW:1St {gp>ɱn۲,˲,Z92,˲,˲ ̙3CeO?t#wuRXjԨvر.|I޽ _}ݲ,˲,k%ײ,˲,˲p͟??[uQ ǐ38#c<,hGڵIԲ,˲,\˲,*K=I=B.#wI=p.G ɴi2|ᇡ@{'?wyɝwޙp sMҡCD%,~헜~Wzꫯ^j5 } cY\]tEl^{z뭷x7^;,>CSunnݺviw 9M/kժն?sWԶWVIeYeY*eYU!E|joӝvicǵ_Wg}dԨQ 2$ -lĈ;'"o_}ՂM6Ʉ 4q2]9hẼxW^ M:5C${83XfCD 3eYeYeYU Uyoe5)O"Z"ؙ ><+$l#S强駟/J.zUER~Vqٲe4@v{B'q|*&8;c9Bθ\/S Tg4O.ڂ­zS9z+hښ 4 h>Er57xcȑ238;\P2꺸u^$@@9 r)cAչʴ=ȿm֬Y"0\_euO"/'\kZ2,˲,ײ,˪p"&`!^)38cm*@嚍6(;ȭm{: #6UGd d {jUA9]o- tqʬt&rk.]c"#NAU ~#Pp+ ^tE_vy/`n{3>LSOM<TVI{jwXM5훵3XN-'tҵ +̙3Z-2V p-˲,*/2,˲*@zdԨQS8cL䬳J.gI䲍UL0w{XUn޼9TR<Бdロ;x<7nwk3-eM6 >Sr0}z ߸p믿=#j5}вe˂^|Y0O>dhѣGa׭%i߾}aekZ[eYeZeYR/R'8gAt˵SNɽޛ( @O@)ЫL[+9Q$ɲ駟̛7/2dHt"Fp '_~3<(^m۶mӧqcdž3}cL㡇 ܼԱcǎay;`OWe[jr pG\Ǵߟ(Z"lړsUjΜ9![ aO?zռu@<{ao.hm\i &LPV,?>8=eYeYEeYUu5 [gk֭($x,DU `*5C0)c8EFEh>lذpӹ0)O8ylEJDI0>?\(?jx&M 8룋Z&: DsD|^Lm^$^y0}l|O$7mHtơLaYpퟶe}"Z :ۓCރ oP|tX^{[js?W9hKK. P1NR-tPxm 8(S;\f!lvxX2h֩rws<߿2pVoI`Uײ,˲"\˲,*u@*B!\-0T#?̞{QDFQFp)<]w;sH&).V28u .U.cg=P"p $(Couhes7,1)S Nhp=HR'iӦƸ|1eOuaچ_'E _~gYEDNp_|E+x2xɾ̚5e&p 0QsyZxG=@P*w{/|(pKvh'ed'XDIiSQ^j֬ 8JنYQD{|;:rd=s <`;bmJ#4va9eoP}اkR?mjR!HYײ,˲"\˲,˲S.;ޯۓuN;[4h:hf|tdp,J~_G7;t`|ô~E|M=&X4\sMAj# '8Xve9[0c8~ AUMS)F9e]Ep7@ּփ+pAg4>so˺FN!/篓"XN:)p裏B[nh۝jWYwcH[]wiMՋw x\LrۆmGelƉn˵\W}]5'6! QG{饗_*{:ۭ.w 9]_zxNؙ{(\o({dײxl gGApUT)7 yIu:ձ!/.l{C8ZeYeZeYUJpBQOX J& %馛X= 0Un^?Y uYgATQC~&hD %c]rNj {K'%C Ե W9qSժUSWuqU}}E[" pqjZ!:+R)y fQ; .> +oDhݏem-S< @[ Zԟ'O8nKA\?WUn 6Oa8T?) Wَ]`L >miJO[Gƅp{;*7Z~mQZ2VV-Í7yS:_kkx'QaEMq\eݱeYeYEeYeR\S]ӫ{.wɱy~ .JF.t%>^Tl(Y'0pK@ksR~]í" Gg$3x㍁=TrvEӚrʆi0-9Z(/7Ž+Džҫ"\DY GfIb 8Gr5c/'٘ʪ_wy*$u!s  b'^=@v)WNb!c A.183@5 &RL Z΋mx4딸$3XnJ3E^l_m[ W]Y9^ 3fʻyg̗~--^8tXZeYU^dkYeYVq*30m`PT 8[] <ܴg,8,XGAڅ,\uuQ".@@a|3?0De@GM+A+ JrHTTBzd@c(M/!ͫߊ@$t)4.9ٺ  : pK1Χ8if,O6mpP0Oi!Z^O" %:\cA@c2\T0?:ti,t>-.8 UeLl I}$fDlLC)p's'vm[<~оtV2,˲,ײ,˲RL~ [#!_Е{Y7"{p悪QFJwp jcRd "h^TG ?uVN1V 'FG1>OѰ;W8 K"ܴt(F%TU;B29P3"or2G* fn2@⸮e]v:v:ɓ' BS}NQuW[n!#]1e3wzHz!hZ':Q C7gW&gʭ}"DGBF3vDl[O!@Oc=z$tW2,˲,ײ,˲2.П(r 6 ,dQ=Qn|s XT]~J\\ …KA%Q ]z(yUN;iS;<]J- Gˇ#XMGd9]tx )h"KU clC>v@ c}MP, Up.kH;vL8.("">f:\_ ~X =.o-pGv@^qar>p .q* \-w OmbMfUni]aO|Cn Bv-)h+H mħ@^8,C֯@=^G<"p388K6Ծzu}rm@;1* G6 f] ml|m뀷l+lq*Kybό:$6:$׃&la``PÇ?K=.h+H'E\`p1' g=Nb%ν<P25Wvg$kK='Jff ml>}o)&'D/0K2d3HCv  ?[e pi~'qfpiиɡuR[m+Ěh;L 9 V^miuǫRW:=QVײ,˲"\˲,˲S7x#QlΤyeWprvoߞٓ{9YjܪB Uo nTn`9By;A".Xrl尼0J]=+*Ch5wA8OL8Q ʷ2L+ue\ܭY9sfc3rHkFam@=uWuІX@1g-fnQJ.tJu0vÀ#2hw!ۗDLh[PܯɒX^patdGƲ @SLfE0\(@1ӏ֪38cJe>ݒNd T/dg]\CbaM/wr7NaEb DQ̶8bĈK ZBgxReYeZeYU (,`4ဢH* ٻw0>W3>ܷW]v pJ8^V ԩSM;w6+::K_9/sG^H%R"}tZ Q@Oj$:nƚw.Y:\Nu_p"\rƋՙܶW_}`qWF+d 耊 }3'ƙK_qѭ:'F# rm2&<Ġ>`jܷjÇ1ox`ao]N_zcJ󰄇Eqx˴ܥ}%¶GCeYeYEeYe pƜUr]P1 ëӂWqI>%v%XQ "A w;kbgKrjbWt-RX% 0^m .!,H-X>EA L1[j f,^qE{"e! ( F,XecΣV0mAK}#}309j]~\:u+J,Y Cv`g ʾ +)s2-[}<ۊ=:ѫ W̍lk~N m]NӃ3vyuGA˾F!EJLM[=|Vֿ͛2ut0,˲,ײ,˲T* ,rzhK/DͧNpv5dobmFZyu%@cr?6kMzřd"RDt!;6\pq \ܵDL^"@^ܷnK9P:: -yYW^|<1ȅ!P$+Eh3K@~Zǐ5d;Ro1UUU$"bQ:),zlN+>￿I3AAClu릳f: kk2ű W{mcYfM@mTc?T݊f[cy#\qTq=i>o{TUDP'=|I1܎HUǸjTYs4BG/1Ӹz0,˲,ײ,˲T*3@C&::ZnZX JZʭ|8ˉ{{@,\$R / 2 8ת`UF0.^]I6gT.֝ɑ)t:ߌk<,Ǭ\=ݸ? 8%9qչ"ȅH`- \&XɢB`N- WĎW)2_Ik=9'eϭVmdf-\W[yv;&#'8uA}rY\E>*>(l; Rk1Y\a pqX-qt<P@C b-zq!rOē&MJ}ݰNW3HkY>[0ū`AvYײ,˲"\˲,˲S|^3gNH z|qEp^SLz:dcY @,S*ӧOpE]x\wW ǧܵ s6gsbkLt4mjgI}[\À3:gA 8]\`Wz]0-xP'ly|bG%3V9py}E ,a WqBn[kUrhSReOΝ~+8!>Yu.DvWD;fm [|U@ǩ=-ZW)*&X*x;N^Ic93Mpߏ닿OE-$WyQ_yР\mbߒC|*1d=>P]W|õ_mEGyԁx @nIDL̃W-5ɚEɩ-Ke)\˲,˲ʋ p-˲,*N(={vdxU[ܢE%#ŀtnpAYL0^tC2-`RGiŵ'wuvȌEtFt$aĉr~ D+F+dAeG#mAuU!+qơ~,I^pa@<"PWlSEb/ρ8\ae@bWLirFn3,p1pY(d; :q rǸb[˕7.yPA, 9W1_Z( #9@CѣG/nj5)m ~qT.ؿ|="E #e)\˲,˲ʋ p-˲,*N\`?>@;^kX FN u125L Z'bB<W܃3'YD! NF$_Jx *ᬤ>8T ЈW&Pȑ#:4$t] Gp]-`B,]t %)ԛv+̷jժ1?:!c@A:bz$܀:cZQFva"m:ar<0\ayv.ɴ',mTmiE#f͚HK~,òQp2}w/m/_cڀ S/uٰm0!{mڵ~2,l?m+N<0AQm.l<Pvئe)\˲,˲ʋ p-˲,*Nh̙KU(I45sU譾0{\Cy\*72pFbL^E2lkӒ ԫۡTTTBhQ2]^n ˼ːMB7k%* V 9rQOh씊!c | .Xd:)MeYh dd2ֱNhlԛ .@LkED9[i}.1 2NQ^[z@Ȟqb8+A=7Z,dkYeYVyeYeYũZV*^ ZguJR4N*@g:ǎƵekYeYVyeYeYw%HʉMZ (v.l׸2V p-˲,*/2,˲,8Du-d-H:!zuvIJ.Q\ЭiTօo+EŜ3+ gP0aQA@@QD0EA EAĜWt 3Ìq^VtPU=S=uZ2,˲,kY :O282I0`W>eYe_X쌜,NBL8`,:UҮ]_}UpOekYeYֲ_~9a BiMTkeYe_,("X, v[bN=OpYjEekYeYֲ ҦOҨQZ9;c*Uw;ZeYVn(\l۶m=-OZlYBn׮]~%< OҖeYeYK[#FHL:u yzjOZeYVTdk-mZeY45s̰;34i$R,N?t[eY ,\kiJ]w]\qh޼ydAboxeYek۽{k׮"kI-PTT {޽o[Ow\~->g"5N<(X~WxD-˲,kV.=a-i暼pr-kIcMԩckYeYVt`;v U %-[ܩm۶Os=\{jժVV-DuQ<ޓFAyqq 쯿"eUI@lT͞=;D:ֲ,˲V`J+5p^pqYe%>t~qY"eW_q8B s=-YoeYec'ԩS 8PQkK-Ypݲp٥^*,}g~L*}xr"feYeb-78#l馽W*T,VYe_0a]pDe3<>\pVM²,˲,rp۷ow}&Ao"myvi6}(=Bx`(ܷ^QxɁK.QeYeoM( ;ZW%\V[pY*ڵk'_})IXeYeɰaB*S^~W["mV֨Q#UJw{,('nF`A_KxI۬ !;oB7=ztU}gwͅ|oJp۾T eYe\rr$z lvɪN,\jg՞ovXeY5m4r'ON9D%?|\|l Eݞ۶Ƃe1eBMv[h&wܹղ,˲,˲,˲,˲,*Howx;eʔ'Htr"`?lIam.ي7HBڄM6$ZeYeYeYeYeY@7xWE&cǎ iYݵqۼrI DoI@zE-˲,˲,˲,˲,˲J["s)mn\jm _׮]+ݡeYeYeYeYeYe%gI{@Z>w+:wJR.}^Y#F~aӧW[˲,˲,˲,˲,˲2B-H|r*6[_6\"v1rkYeYeYeYeYeaP6*6|ٳgglCmeYeYeYeYeYbx jJ!M~| Un޲,˲,˲,˲,˲,Znɉ %2װֲ,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲,˲g}6i۶m2hР/O*Vd2weYeYeYTtwOdƌȑ#O>cYe/M>= G\ve6l'..............KNn-y7BeYU4iҤo߾W_}\yoJ˲,˲,˲,kآ:袋BСC>cYe/}r}W{?_pqqqqqqqqqqqqqYe 7|GaÆY޽{{ oYe/G(M;xB jp /^zI3]v@޲,*_bm'ԩSWWܝ..............Kl&;͛7o'ݺuKeYU']tIڵk4h ,˲,˲,˲eA 2 !feYUn4zI&xKx˲,˲,˲,ZqM&'ONE,˲f͚%=Xry%neYeYeYLRJI˖-Yf%cƌqXeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYZ/r2nܸ[nJ=w}7駟3f$suGYeYeYeYeYe-I7y璗^z)ԩSҨQPt钌5*yz+6mZi_,˲,˲,˲,˲,˲&N/O<1y[o5ے'|2mٲe2ts{ȑ#݁eYeYeYeYeYKD4(ڵk2}m7n\YfGW^2~o{@n&M?0Y,˲,˲,˲,˲,˲XÇ@ SL٣N:V^==餓5jjJw#<\9pmРA"}^x!ٳ;Ҳ,˲,˲,˲,˲,%mz*UGqGyd^;찔rqǥ_~yںu&]; Ѹ뮻.,~fYeYeYeYeYeF!ѣ>sC=4R>ʋ aÆsyjwqG'$}I~䣏>rZeY+>S7ͤ]v 7n|bͤIgy&رcr}%۟ɱuY{쑬,˲,*d2m&zgM>UVɎ;X.dKsN`J{cˏ ^7~8UBRJ{{nzJ^}GMbd뭷^]vYrꩧ&\sM.$q,˲,˲,o(Z4tM 3cGWZ5? \֭iǍwG} %WҿӘ0o%XeY m&L;_~~nkku+2L9܈O?ʔWnɾ"V?kM4S"c`rWʕ+'ҕgyG=DV^=̓O>9{.Y z_P t㑊Awrn"q2` 0e˖sE0ӏx:K=\%0v[/Oz=%R~nONr+ltƋXKBz؛9s&>BoM7>..Kiލv]Q_Ubgr.Smnc< , a*i!d~iKym|\Ƅk31aw=?&//R}ko*p'o&@̰?^̲,ZETR$di 35QOf2gI9@} wS7G9(cXTy"\w"O@尼(*x*SLcI*:Y1_@Xe:KENSTZP7J5yxhE[I@xW}ʑJΧK!BT z WšZo5.ܝo, pP>{ae-NO߾}cii.KNC%6d_X,DFiWxa͛xG w}GBZ8.B[+9l}饗B:Θ1{žZ%,dF\`ĉCt. V%^%eY\jذa erW?Փb|+긝,<v"̀R"iBR;+!{)X+`YN`_92<k ~iF-N38~ D ~ʙIOJM?J_U|3[ @"*V%]CϞ=ClɐAQµ pe]L3uYV.y,kqJ&7uY&.>P˱Xu}hjΰxX ,Ao| 4ѷ5-h"aUnWnó9;? ~V"ɓR4hJ )xWWt6D&@%ͿV*%nebk&o[htڕTom1E%ijuUV EEZ..Uƌt b6'!g.ܸhR|Ai>9>C(sf/4hsΩƃ 汨"wk/8eY\L>c;E/kA8ص5ߓVG3EONłuի ֊8bb35mh>) V7f_9rd~ྲྀKYg+X -F Jԭ]Ѥd>7jw XݤaRA؋"?sbTRwq*r.~bJu+U:n'O=T p-\\*R}Yu>ёD$M72 ږ`L< Nu{m/nbؓe;XnGG!h4 yyyǵ,˲?' {"S*/mn)ߤ|mdo"\3$:j䉧 u6Ahu# d/ϔ'tR%Z# )rן(ݐP~^}RE۩S'|ާ<9C@^{+"@Ev`_|6mzQ6m<b0Oa5qTOWoXXkkຸZ..VQ>ӉQ0 3W^n[Xx.cSZnג6\-z^Ø΃kYe-T3UnhV͓O>l t*]W>gU!l2晬*Z_3^hgMEWS;gJ+T R[X9_v_uJ+HjL؏A/9Bwo"y]m^3v6 p]\ p-\\X1v`fzN@RU*DnʂÍNx n\{OƉ!WifP"py,˲ptLI8ʝ!olǃAM~MJt3U"`UnTC rjsɕKݎ;z*<#!ʕƍ OGn߾}zZ˺{`lf ~ҸiLaOnqdgM p=+<mU-ZeYr rF%BQ +7~$x8 DُHWoÀ@Z OV"`q|!R-h15;lذdȐ!@ %O-˾D 0&\i \QgqF8/:Sԁ4E8"̙3> @ds{wHdƌq';31"0aBpfܤO4)V,p G|eL816mZ;8|Imd%vU \fJ}PϏ?845+V@04}N{7] S j{pɓ'/N }CR?\ܒ7 ja+c︮'a{ qs]b?jԨ`ϸn BIl0c۰Eg?± ;C'l-L^YkY ;YNl8}I;VYgLџEݟ p] p1/sؐ6"9k5~ٗn@"{M*ߤ-g3M][wl RPyToZО-6۰ 㐂H>fd 9[DP.PeYU8%8^9"Ќ@pJd+)%2LNgFь C(s0pg;Nѹpq~27QtHs˱(8S`bA>.>|F]q)8?< ~sNgs 'R>g{Ҋ`A?F4 UfiOh+Xbxo'/Xo#\ b(13GzΏsUpC{?v%}fR.{ )3A]PQܟ7b 05-` v㰽Eȁ-nda_hr\J>Q1vFl2l#+lq|E}nΜ9)/R_WFM6]{cd o6}5_=p-aO,=e[@W5D:|7g?/6eG=>6]Z{ڹ4E`E\WkqgJ:Qx{lkkYeY]xS +#R* ;BTTRc?D#^ sDڒ?XsmRY("&pQe#,V>noˀ19@Hg)Kԇkl cF PL_b>d ʆ6f+U_uc-meu@w6g*[v3wf@ܟ?霏&/T~Uئ~ΔömW/pf?W8K~S~[./{,h:mذa\=6\'Tmöp<=،5U xO-Aݹ`, rNP pMޒcfl@[^qӟJߚte'=aB 3zgJ'~+cu1uOߚtvvsU"cD=VPg=Sޘ#/ v+>=ע2^/WD&RF`[m k#68`r.Sm'3+NE?[( ^,uxlZڢρ:uN9)Zn1܏ p p-˲,Z-^Ar6+kh"'dHF,YN)g7:qΠZNrt{iWPG9ȩ޿"P:X]mv`Od@8,[SQˑ H?V6Z11@ Җ(4ܽ{gS^r]%;Imi>\ȝ~j"U^2Pt&_Ȭ$(7oR?^T>@\B=f?PFST~=N(AR4e }0s}Q?_{~'_Ox<?) 4~Ƙg{/pEמ퓾=x`Ci-o`ҡ=L8a{24zw_wi\l 6sa:Q _RDh`v;E" xC+r ƩQ7΃}Vz2LkwlSuًB َ.D_qOF( ˅5ýva䬧}76 (_;UMs?C)0Ƈ]iR,pyקWP#9 :76ʴ -t qtrxkz]SjvNצc>Q$}y|Ǵ5k\P%ܫq/ֻg/TJJ/ْ:ЎcL}EzGP)}헞uoەo0CŴv׿ϣ/?^~_] mq}fz<\~g{ %<^u<0"l:KmԦYi\i?gf !7fl>bvjZnq;mfo"s:Z0%}]LckkYeYRE'u ҫrӇ~8رcUө«o0gDjR:cpGQiKpxWT e,4G~E€C`5CT^PB)! N5rfZj~}W}8^,%Wm 4W֍9jWi`$gpW+GxWڨ@b7{VN و dpĀAUSA?/R-^Tpg~:}k齷ߖ}-77J[0٨Qڶmۤ{9DF@'-ܘvh~G[_DХsڢaV-};DڻWiF7_nMuawuNw.uS;3;ҙ~{ڮI>~ѥ;%";> -t76'nv JP@! Snj Flt f{ٌ5=or.E…shsR :^*Hr>t uhmN6~"b^0C̄\k.]Rw=/% : o{+d24ز6 (*P{= l[#'=Vh+m.'E'o]d*ԋEFN_o `u=XLJږ<_Zk5Dϲ<%t!>W"rgB gԌ';m :iAHɵFa[U +W1Ꟈuz{o+<)Q\l-H;'q`-}g!eYe-aˠ(38r^A}@!9Ý| Ws=7L9).,QC U~šh: "c0_ڲs>_Xy7PH6jOؖ~y*=8ݼ>J#x,k+8f:*aA:+]ڨIà `:˱~Wopcv9m@":WkM_u?YOA9؆A GG*nqed?>,m#F3Wz 7@\;'~]Ѯ:i٫:|+鳏t 珥]G'fM:wZZ,[{qm-[@i{иi\.0gٔoe{5VXr/\\읶o彉Ŧ [rc#cu\[,r f*5/|g^Ӄ^2T|FWH=dDvQګtwCsl妲{aϰK<"h3gdÆ/}G}N}&}K[VJ fyD>-5.i}IWZiwe+4jU?tҗϦEJaQq[w %R2t%%>go[.VKQUY;88ovNN_9wpCn4 =.Tsb#T@\غx dar<[8yCh#,ʂ(So '^}vyDѸ!;lkkYeYRqq^5-\a]w]:Ai@~jcټ`()6hT*ȇ+}Tr e-ǒßa. Jn $G$ξr{1H\3%T@ hǑS#V$5r^I !}`sV:,VN9W^ p=NzO}P _f#t[Y- $Е 73Bn~;Ѐ"_=u\_p̯*!Q>^f)9Y0wӹS? C]K$C-[_N_p ޕp_? DǴJ/dmfsZޙvU{4b@ꔑX\ѷp6?uѷ2XD٦Ds!;MHz8a0k8ܖ%؁\+zK3rz06-T]|>#ꖇU@ܱDu.z Uo8W?-} 21yn* [Hae;Wz%LhPLuzZ[0u -tUBׇ|Eܐ<8C;=}B9u]$遇qr^:Dʲ@uϯU}lfW`wɇv%]mUӝw6a(U2q ?<\{k4\@m˱;!.~ÎY4"w6C6K|wk,g W+l[aqtov67IÅM?[ /'YASd\{'wqG(<餓fmV3pW p p-˲,Z.e`DB$(9$wfɰN;@vx:<EA5x1WrnFirh"5# }H>6 ~B`1F)#e}Bp!lpt rNCQ_U1QgojhڵùhS2qѲ(VUGઞ<-l{LM#$#Y sdߎHIkA]F0@\cYZ`mn[o{[VHٗz?621~iP+|! `őF:S;?P)kkN-.bԦ1OO*Ul D{.vR $` v4nbN- jrZe{ %)qQ!}OM>gS65(RPO1Uet}"cvM>||G왎׾o[:K+GvۮjW_,s7+UĂɇz$}gB[)ST^rB!ۘl*XXč>بlՑمR*e Ѩx&\ŵ5kmC,lWWh 0 _"Dž\Zjkwy9/|<Tۇdmf-j&TiD_E2Cu&͚5c㷦/ZlPDeYeY ej0=qr1xJ0D D(mвx ye0M)QD9/c{لp qYhO` X7oķ9ďT@Y 8,2Mԙ<]|"3vۘQ냳O΃saN-|>:Hd mE`pה疺nr⎕V(hmIV; n{UL_p^17\}" 61DawݷlS,ix#cu^d &N!S"7(?t.}G6'n+.ur. xe$x ~#9#Peǩ"F곚 (/pu Q,Υ:nIs"j(8 +q@N9s1BWjW/".ǼGl\g`a~4=<.3W f0 aR`fyl3}k!O,0t 1pLBHE87}c{wz7 Q`cFiA'ok1}v Q? eA6oZ(&g}PI.Q* *Z+r3`vzE(uآmfɾ}>ۛzRx̡qJ`# zbnI$pܫhҗ|8ݒTX 0Z?}qDҡ:K0 pOR*Vmz {VV)"ŶŔ%Q)%7v&ןHvyصOe(꯯b{yH(?"DX{1njY82-dmxL`kkYeYep;[Ѡ-䬵R[[bY_0e_sVHF$,`27kEq* 8 r-puB6W..Z#')N)e91݃p8@i3o,i 6Em -Fߔv?rT~Շ_/a[!*< pIUZŢ`O~G0Wk2 h@ ,`$qv}#7̝aK[žfgZ.u@>a~( !7e?YTa;}VU_E5r?r1 UvAMlU7A*BfyyEmoZ4mP=gq3TN1zqm].o(^ql={Qq+P;<\{о)@ޗϦ\uoH30vp-.]Ch{IR0-۶$*TH+TX9]wJmֻ[0Cy΁vu|EAp "fKx\_J _j~VJb(WGmkD=5.k_T絰zWT~!~eYeYS#icom :|!cO?"ZD LL, ~2&J'Q8+gw2ZYT,jَA:c@9$6gcdwbwq)ڦ Dݚ_[?'20X@҈zЯAο;8C[#xV>ֶܕ\+ƅb{r kT ٞ?İAj=SS iKT^X#=;m'1& %`)mhSi#{\~9~c[2Ii܋܅.׶ of@<TUp“56̎pNuB62BEe![ʽ)v&Pe.rg+\P-E _iyߵaqPk_wcCdnR0tO~ =*)5JO=b싫Dr\\";Gu~*cTفtm^@uݼ+py1]W})4?f1ؾlPSX\ذD[ⱍ5߮jժyϞzI'$eYe-#W~|9g $`cӬSt^p5 @Pe[¢ 8(M=pP:vgk;\>y%W  \<j%Ȝ/ѰRǝ rQˣ8Ws0b bhw,pE$[>ՠc{U@\~>㿞n uaDٳg%rp&VR {yKځ۶Qqѷ%->τvt ╿kso8W\q䒔jP<[t=J> L$)\: -c*;_ آl/{ELC1 ~19Rp0("?rX-,R6 Q!&vy=t%W-Գ$mLy"?묻VնsLV]mIe pVԹu[lL= P`u_Kqil$.pK=zc=k- Q2rHRzu8#2wR͋ݞZeYn)."ds8 =BPttabE} BVDj-:rhն۷oT"prO9wyR|z4]lN3Sux,FykWpr 9OjG'DDZp+(N8\ >h>Mi D8L\ڠw' 8X#"68 U)ca Jp_~à 4gΜ 7K[")Z;HغU:7ʏZ]n@Kz:DsX{DgBo4؅*)M1-=e,7R([D>6s] "e#26{Kaٓk7lC[dw_>z( vF]+Dy/L쓘c]6ܸH3[v}=yqi\nqxow}H䒎BeW7^v`eYT\|M2+W&#JnRCGY+ײ,˲e8k@:u?h5Z"**?pfyROy%gtDNr=?qaD8ٙ@ @drv`Ўy};r.h'̑U0>E`̍8:~,."jìmT ٨u4E=cd&G,bǞ;!Ycj|ѢCXp6v&UY P?sI' NQr9 W.d wE~YKt!EA{Tߗҷ/o.9~8f9q^J 躼[eok@Ber rirLcM} pWvlH6d'Sn{f-qA.) EtM-j|ʉY}E K+uWJd}5k0}'fCdC4n8iڴiҠAʊH1Hl}mݖh\!0Cײ,˲e~'3 _i8b10~*^6'] Z 5j%'<ˁ@ "yrB:. @uR6yqhPP*jT_9D[1plnČ˨ 쉼ፋӟYl~5x/v!gd 7 vΫ=601|7@@\@ c?'+''.)dSM!W[S?ʏ%%Cn*pK=/Fi"Φ(X<VyGO&yQݽ}{"frPPh#C<g7qr`ƵXP’s*ج%peV$]cꛡ݀Ubmf>Z\ݛM~)ǥx %m6]))n_r%-vvIhYwDR i´'iy0]s'ME^PgzCׇ2..()]4 m*Iu֨ԋ=-%ʳ˂qkQ}o"C\}/~ k[$'_y7 Vўp RM|p]0$Vyh1c@Mm>U,{R[h:ߜE>_:E -Xm=SڵkBjzNpZeY \Ypc|\\i]s9'|'t Ob 4ȮB "*ǒ>O۞} ۵kO ܇K"!6ʠ+ibz68Ҵ Ǟ=p  0 /79\ӗ8 Z%j> xp) D9g+ Jsv+}!!ki^ ^[(BTڵ +,.Q)h_ips ph vvHfiw[vڵ1{&)Ntm;r&.#y]xjݝqQ0]˷~鍽t1HvCل9{MNq<Rڷ`+la2%)"'ΊtD>lwi;fe/@T@ŝFa]LYDaG6#`-6cתШw>띞vnwsZ҆a;gGOePhg;a!"7Dfsr<ݷ}67"@f?.U^ 0CAlTlRco?` d/''6ÇYB7zj _K6y+ kIlvRv~ѰE{>UZ:SNԬY3+N>d`zM6]eYe-7+=ccY9q^|Y&Tr 5mtZ"& r\8H⹘A8)E\ N?RqS +szv\ XP`רlګp96αl0oƠ/&Vhv*4 zRT 'r9 0U/ P+= dbUW %u `L/9yl?,\{>2λeǼ|lq\Az{)">~mDS ]x7E2zT~[[/'"no_Lxsk!w%I%Qbs-ccb@uEX Ot-u٧sFa밿3+׭y"-{;#|i\e![9O8{#z`v{%}oQ* pA@W_)y'xlv*8*E@Иe/Cuz㘪.p#?Q P 5\-Ȯ>5u= ypj/ u7-_vy^Q-}6`rW=bKIϩY-}y|G j&Kt{_>vd+Oz# v!""g|{۶mR`u2 dn®qlf;,Iyr}EcOiUmƽ姟~Z, ~ *?N3ZbC3&Uo=m'8veYe-e]lu]&M3<Qϝw9l#ttKX q> d"0AgQ.1gq?,p 8J `$ND@N=K=t򮜭b34:S15}A1l?U-Ѕ-b ߣ`a`Aꄘ&A5v&m3UmLpALv 4> p' PT>a;/ x#J Z Zs{"~{lSmr ;[R9qѴyJS#3oˡ>X.(1=p6Gʲ3c:] vH5 s>-F^\B4Z`q\Z`rv P!Pӈz3|Nqm"m/<ƞbeQ-kir̶Bpt*wvEڵwӴ-燜%; e pN}"0Bf)Gzki;E"ޭ<|w #"(a@/}S\T6X;S}E' /76Xu`~̾IyER5 yv(3k {")Yk|6/M(h#$yD[b1_\b`]s5f][r5gnᆰHlD2~05,˲,k ܘ3i֠'9<ח\_N굂}a&d @)N&P+zp<9ӴmEO՗s~ܛψd88DU3RDhc1V\0'rUҵGc>?n&@RO`l'|8\9\U9Qv!l<`/T:!)g-90`6B= m K  p TF֟-5mmmT/4ʋSA\ cLjz $`Dt!}Y(0X<~@E'ܻ!}Ep\L(ɡ1_.9KE^ H28XLjZ8oe+9pK py_Pd.r-@tmNz#SZ]O`3bjAwyHFu`~  E~:UYt7fΊ怼:ϥ@h۹ߔ fO@ kbnI.u+؎E܂7`Vl3Ѯ%}-W:'|b+;]aÆѯ\x;XSO ײ,˲ p "O }<2`4NaVM@SZ8DfЎĹf!9s+wS8L'f G9C`HZ@܍A2ppqf&S Rڥ |tжX'[55xK%R҈u*jtE"U>>@1!q1X ԶowV9XSOcc;cX/>-Fp' ,IlH,{9r]] @I֍n ׿_HMs 9{M Co4}!Lj9wC~֛cCT,}k p}]>kgdS*,M еV;>z>{lUQ.dѸ\zSOڌl, r,.3'O\jX葁>:DT>A۳ffMl!HGPCۯ} C]㥵#} @=D.ug'qdhݶK"bZ Pacy{4q;<(jYM70} +mW_uym7+2puf*7EJ9EYl2;j-]p|FP|˸ȣqqHj$cvddqOns.߳yrE,v`ao8ŹX˂mU_qըQcp|=ײ,˲% pq*-9Y|M w|ps9ه0?q`/ ԩSG#dWkG97"xtܘ~POT?XH*8D6A@v{ 9q#D-Cɣc}ԇBDtj ~B& u*iӝ^u9D}ILrg<]g >s J>@}6$QOπYW|A)ފ ?[">P}'G,KXד 9jbZN +ѤTvΖtH?tٸ7X- %[O.t%to6Ih'apc=a  .4 K M7;ښ (0UQTO|9iRZ@c#[VxRd65ZK. f 7-+zF|"Zb#x]hB.G6Tٜˎr"6~:=eFicCh]|X-uyWvMuc"\{rҽ9}i. `԰Ri {5NO;rZq7X']k5Yf]W7mSdYP/wǤzЅJqҦ^lf뭕Κn>1%iynɡ^;m~蓵YC6gУNse:ݮer /ļEj:pez`ǰ\KR=9M~XZ?es+cgp_ŜQD " "bZkH dA#I@2,." EDd<3vS1ya_G%M<hzKՃ#(Α]q.u/} YΓ/Kԏ>hжm[ZhҤ 5 z衘cp94W+H1+끥U+f`mG5pwD/(vU'-7=۱ 4eγJ³PP'?.u,uO9S 㨏wܙ4[XӤz2\X`'#Q'i/x1~_?m/hg/pAz3(^q>0ꯌCBb@:kp\%JSgZx\(5|_g=<2 `gs<霽z~#,R6~F7a(_˄Ik8iKS_|l#g9vRfnR_AK? GYys=BW?HFߖ>$!?"y^z}@bw_ f7xv~-nd2Lxz @Ӎ%4{`mhYӃ tH=142g@O@tu gpd?MG3:OےfSp&_qx \XGbCYQ9ȿ?/62 i"? ,p1`@%=`A;vEP

ʕI' 7>A&TF-7 Pϡl xxޣ,LQe$G}EB, J=;MjϪ lGBH}J~^sp<Qzmx!0z{_y=AɱIf. ';!]JBE I59F+:7>4u5<<rҷHxޠޢuu !ԃ1ԛcp$}/.>m߾+ kfF/xYסCWz^*y}y:{YpY5p L&da;t2NqK'N:t(?5ެ]:ȓv8qα&DL30|>M|=t#TRxBV}H_4pNIِvŶ<0XVK&PDyHôF :o@ysmI!Қ٩·:53 UfPgQ?EV;7ڝm|ݙ(R`*Rؗsd%]sBAG}F}gD} Wz3CyHmPpx֩x<jFǢWQ?uﻰ ϷҫHu ҜQOfFlG>i|qaxSORȚy 6ְaàn˖-{\d2Lf !ґhޫç K\|14mLoATG-H- 9q=(?`p9Uta2kf@*AdڿWkX8/_>pM&d2.`^fkK_XXvY2VJVm3qn=MQy(\\fpMr/ dn݂>}P ТO) XrXZjM;>Eq 5L&d7VlW(cYe=+ƀ U53k2kѣGu%ZD$0 z뭡pgy9)5k2L&\\33lƍ+W*T.o[c`tŋ]xkʔ)$7 /\&d2 5533k&%Ƴ}GQ9ִiӠUV @\-Z\d2LpMp 5e.]B~mذʕ+'o߾MÆ z(FF= up 5L&dd\S6o ֭4i$hѢEЦMWjޢx޽m'踣^z~矏_md2L pC,,o޼pr5,;. foڳjºt̜93ȗ/^A:u^fk ${FYjժ^Npƒ>pM&d2: PcQ3 $bpr -_>fϧYv$[bEmf2?>h޼GY5K:wlٲzk6|'}A b_|qp=馛`Qm۶{HK+Vd2L\p ,o ,W -wZbYY:~|եǀbUpnpff9f׿e/ۋ7Y=fkGyᆳg.A۹sEٲe|-v!뮻.xǂc9l1k2L&)tG`1>X*t#gf&#۱\ X0/I@-eKrƖ.Ncgz0r=شiS;,7F4gN,{mAI}})Tʕ+R߶g,U\曗nP?tQ'(w=zp!^z[9OX̙\d2L٦K/4PܪXҥ%J*W\J* $̲V|`ժWT,23ZtԁIVt=XJ+T0z%ajr̸=<eb /\7nfbzkp/Yd}D.^xXhћ/B.*T0k2L&)7xۺUd2uSZxq36NF#rFzhرiGFeϤY;=Q#ǘ帍3:ѣG?fϤY؈#bfw1`AF;v L_1HyL L-V5L&ɔmbӧUb dfv@  o37==f¸|havlve)Xxq0tP7P! 7n<2k2L&)۴jժ?/_`!CrVf@Ձ3=v,3/yڵ ߵna8z0mڴ`n,d&d2Wjm۶9ɓr&M0 O+a@ցv{e>`޼ynl27orqz3gNn:L7o޼epM&d2L&d2L&)d2L&d2L&d2R5L&d2L&d2L\*&d2L&d2L&ɔKed2L&d2L&d2r L&d2L&d2L&S.\d2L&d2L&dʥ2k2L&d2L&d2LTpM&d2L&d2L&)d2L&d2L&d2R5L&d2L&d2L\*&d2L&d2L&ɔKed2L&d2L&d2r L&d2L&d2L&S.\d2L&d2L&dʥ2k2L&d2L&d2LTpM&d2L&d2L&)d2L&d2L&d2R5L&ɔ5j4o|O0!;5ڵk͛7u+V'Ov?35\ YNz]tQٟ5523Yʫ3 ذg,'lOw?+ 7/<81d2L4~K. v:OW ><0`@ @l=:xU(pN>ƾycgvM8#}?m[^=)6|>Fr"xؒ^hݲe;ĕ+WvyKqذa^Nm*=zA5;칤dɒrJ*U(U\ʕhr]XB *^\\rUr*_ryҥʔuϸufiK._Le˔W\ K]XDGyy_fVdd2Lt?; nݺ8AUj t|7ݶ@9s8h 6m .'\~c[WYb@ ݶk=e_/ w  '_t 1aDiY*`Igƍ7^ ?X NTxׇk631㞓nx'W\VYUdUɪ5_: UVZ,G'|RXe֮ep @nx~"W\VVCvYZ\B(ݻwodd2Ltg+Sj y h?ݨwcƌ z`(@U+#Gtt8@߁Zȍ. U+ŸW,3߯ C὏ }n8cEpJ3Y$(]8ٳwG=gY=gv'_o o.13![ͯo~Mua-{Wvo LpM&dJOO-^ 'Pk9#Lr,HJJr黓{ i;WǼ[s$|sՓ8)]rO8֍[V^z&{~VtX)jO۷K*v۶#~HkQ`[ayΖV~s1\+mmA6xjolodP\,-ZCZ۷'fWZ5L?JcwWPO?vv aJIbpa2 p-be)|p 6 ڶmٴlgq䑁\3&pM&t*.6,@3xo#(Nbm@YԾ2;]@=yu{,@&{}dʕ@V7'!vk ^M+;~xotwqwu[{B7ض!84q۹sipPܡ1v)\)uꩧ5kv~Yg:fpMp L&X,Z,Pȳ-h&:OYAP-0 j(]BO!5/ $` ţV~O_ 3f#:*š̱]1i9ͽ^yεIw[ n*" i`@<8Ge WbŊ:]ίFsDƑo }^*qUc\A箈7.U uW2U`Tx]ņ.pՀ VT*aU!|*\33&ffpMjly\6dܼy@ /\xC=09-Q@P.>.תR%nPJsfTn ܁TwN~g[A"7 1*pƃ~׮]= ~r4Rcb>Ml4k2k\S<@ƍ[L +;.wa_~cl@ %KtO=⑋^qx8Xe7,{x=. ; q 8qfŘ?w8!f#Ȯy;~ts35 ^;ޣ>D~,ɟg*c,pĬUW]@Tǹ8s`/ƾ{}v(x[HM;#=>ysCهrwSf"\O:2:#qMH?i؏ttr,9nfY1NeH98y'O 8 0?$A{J礃B>scL⹢sCZ{jEMK6>A&M^̂ƔCe: ǢMǺ{݀~9/V}gdT@WfR?W`Dv40Xd\ @U^=׿^XvP3/:)s0Gy$tx53s(v 7XbJ*gq1g*sF6ħ$呝b\ι񼚺L{5,'|y~!/x='9<Dŵu^\s[tY?ͱX,,QoWOˀ( uzJ0K金'gDH@Q_`A;_^Eu\!>4'cl㈩ m~'O3&6>kԨ^Ш:s >P^3xY{>wlvꫯvQ]Ss O8K/Y|@uik;Gϟ?Ƚɋ+A&N0**&>y C0g c0ACo40=CNc"$7ž ; 7xLI%NED I4q#o)/^(y`Lc1Dl!f}<4s^?sSfd;ʖ*xw@yt}(ʀ7L}z&Ay5kf7.o_۩9>.D>6"#vb@{B[QZH;vua(?l i33Q/B1\)b;jEy1$ ίJmW=xa6hЀqÉM+W1=~`˵1z[4} 4J޳s!:7H3ig lo7h+'Ghg ԎA;qH1;M͛x_-=(&t>0-l97}:.{6.Mq=/vDqmy9ʏmR/+k`t`E?Y XQ.-oUL n. E?|%B yRteT;.px@E)c6(Taȵ?Fsr-t2cn GZ=ꨣc9ƙoGMY_tSq]LBɯ/PX*xČYgc|Jߒk *G3DFĘ? J܆#ypc> ϕ zHd,/PI3})U^~VU}'z;! N_͇_9 h`%J_Gıx.Ep_ۚgGN~G3rr qq,?Nׂ\suWa< ͛{ Ǽ*Tn;~Sg00]pThXW˓5\ٴ vY kz%:F}5kF4 ? Zito3+\ @\`}nW6`̫}\5`/x;bbǰuW<4(4۶kuAZc۹ӑ~I8vkY./v\X_)|aO~>lojM\yCR_00sg}Y]{Na .ϩ+C'Nm >`jwNu:ьo=էOWN>6P]{:Ou)qɧ&EU[*e*RS;YOҭ i x9zelӗ.rьeܕs\dzwM ߛ4?3)`f EO?_l>UrN[>=Y}mUmYl>>}fY˾ߟ$2K?|nk/l}d)tpƒogהbHcUO fX\Gɵg.Mgv? $@wIY./lWhP 0wm c<^򲕶1#xgƳ6},B0&n@fbjlXi>Jjn#6/njhLӿw>y jWNy*87/ePB<9yv<wr-7`w._:7746+pm<~ٖa)klH;xqFL^t.W JoA\@ X :V,x t*m|~ f|*,܃L'^.s0e?D|^o㽌@R.Oɔ{{?AOǧ? zʶX`#מ,3+?}K f',r/6<ɣ_ H 0 [<[ 1o9,'7= w(O@&-E<-1m`\sIvIʰ/T.Mp!6&! ptCU0*PW1*P(3a`JD@E{- r*ʄ$PTJ)T%jjKU yK['*e*O*P`@3 &T iH> R醴ؗ4toĹtMHע|#_Y}C%xǠXx=5FI?e@YpnU "_4Ӹ,y둚wA8:Wјoy.m;MPR9o ' _ojعa'Y&/NÇmbM#?5"}4kvNJ/5{nK9iْNa/#v^ ]Fƾfq.Z7TI~랚BRO;K| 1x|T$h.vN9#EpaMGZVΥ73y^ p&<R=^r۰~a-: {&{oIלԲK海9+61vGL&-pp:4]{y.aC͌'σ!?=Uf ;>0]p{+:Kֲk֨}k'oa}h@ :2S"z1@mEsH`e1^m+ A a#~WYA; XlhWx [< 6,xUx;)Dq~bd;?cs, )١[vixzp )`JK,YBx\)U/ gj Ƽ@=Ba348t PM~(gǙx*SF8 Ѐ@?!o bsGц4^ U=꽚IĮ_*k MwH8/_#ו v TES]޹@FoSn:ypn.V'+ŘL18MoB$Vc\*}Y#Uj*Q`#Б[T~5XݤJ P˛fubV_} /T%{?oEEm@BW ƕu媀v3Xf~բ ]~'Sy*w\yJYsyV5S^iddo0A*wnʏsrY c+e]F}%WI4sm:*}nPݟtMc>N0D :V2+_wJ2r$}:g(xवϾ\ۅtZ8KWV:ٳUpwU'9@۵ypk]q'ؠ]}?.] p~:spww1P->%/p 0u[gix5X*/*bz*͗T;}Y|J(Ag+9`o'7d)?^pV8u[jwj#btvU'ͧRmu?2!٨ytThCݴ/ )>~lpc_O(OulC=mo$mkԫN7/s$tP58XDJ[o0WxwyRә"^&C!^2P;TqNoRpKpYs(= G~<ǁC-`]a;۶[?*r@[H`$;e!m٩ov#{A: †;:AS92oRe?ҴMwҎQRkwq˷TޞƿZ頪O'zo=ZcOhIָh ٛZw}Kw2owp6_dהEH2 ?,%xH ~Pyf޵qY:[Ÿ (9s$V`K IlQp]y+++XHml)w^@,*@?])%(XJB$ cN;|l6TzsV~4Mw^@&.r􁀉x7  ,,7f2&ȂK] |]88ЂZ%Js8O7n ˃ۥ`0p1yTJ`YRТr;=dSзPi]|ou|c|QIrl̀YhTot\*Ko4"|O>ؽRēwmGnp!+&'Bg^^pO-w_wSvxRcy4|(z)Vc*>Q^8:wtԱz_y3s]]U{i}G1۝jԱj@-rjK9]_AX?Bui}w6ڦ3W2% j=pzU{.WЏ_5|9j>7c5{7|_!~\vv9A'Vjod!"AqMۇoAs.0[:TՏ5"v@uɚbTɋFJ[;3Q{'ŊZc/|D<>,>uvynq􂤒xSgpn]m}Bo~_\B>w<$oJ+tH\=,F_aRYъ:|5mXb; U]3oT>UXFٖ}t{aUr{\^2 [/ ച_=~p01*e#zoR۝A8m@A=ev)cŝ㞥m8\.@q]ߔp+f,`OB g-~ zŃ]p's7/<.Ehx1?TWWFQ%/QL`#QY`uJ7c)yޅ(\'/^/Z2$3pġ>^᜸o$J9Lwj ZH3LCxY?\/=?wRF}.xG D䯿{/蠟@[G0Ǧ I0Wx:((-,i{% buuVLq`O8FyXՋPmܯыcV$>??~U9K18⫞AXߛ r=IWU ˳"8}εǾ朜42RZJSwp l><#:2D=m]ѵ<" $3m!3a p( nhUqW] <[Io\},5{j{9*x~,A3w- zT.oUȑ_yH\6:OFFS#Z]xfE;J4" m |$0@)5wL3A`ot*(k:G~:=0U FsS-{N8Ivub$ۦwT޹}t'.xaIc7y3wv V{ :0:;➜ ~Ru V퉑F|}Fy.w.%věw|"\z ̳nu7{ 9u6}7IݪMOPo/2Hʨ[ծTWx?Y>6>NFzY/>`X=ib _ T7#G)'t2f06pMqА,V(܂_ (:th.GڗP{UDu{~Fqg &^![^ }f5 Orw+ԯ:Gί~UJbquGst pVZGEMG}=,aCT H Ύl9P{tIyZs,FT= o) ϚOyᯎv\kT4\}t')fzkT-uF): zpp~ 8Naf:at|ocWbҺ2`Ϳf`̀sNؘ̊fbjv̟3f8/J/.i\}c,_!e $瘪/sC!0A@b-o*sgyv\OV`uDn[B*l7'عa̛om.M\8Q1zd(fk禀/;;уV-RY<)| (宫R+98=38t ~5@+q X?^ÿHu7_ͱ{ \Df G؏؆z_/P//F=H[EH= / #lhM\ ~L:jQw1~M8/m3*h K'Y@Կ=\.Eu 0{6~ - L;?O bx~0}I2\x_%tL;֟[]*nep3 @8j/d=H<0m@+{5Rc(&}i@  (~<~TIwmt16*EQGzP(9Ώ ^1 ⁕@] $P`O\$z/%{'b [cZxAw<4;Yxޥpf O3v3 ǵ"‘6y:qtl+ҏI 3 rq-$pOy?5E~PcieM`+[,=rB]EahN?! gA@,%Vc/_5& -Kv/^z)S~wHt]>>bt*.J#LG/NF/hC{,{;aso+|]{wCw+X?޳i bo @4&]/ZM|ׂJ|97wwm:(loO o(պx&^[[u1ogi.yXHG.^#z 0K4 psR7G4G٩@XML_lx7e4v(U{i'c~-#Cz ~87j=6r`p *h7tXyU=0/٨'EԱ\ jF xLmӀ_FYG22'T2$JM=./3c0/ފ!{k:1,~J.Kcw-'E ̼[cOfNXTNT,5 T6^OWo!d\+8X{(& Y|tb1I9$ $o>yP X!05oA hߔywB-V:x ̤]Wh>o|b~^Ao Ue%/o<{õ10T8VˈGneK#?xuW%v\"@z: d?fB^ TkXzc |p%?z02   $JΊ{TY(&/°|'!PN1ʓ^z>8 uʱ=zG|r^B%ymW{_u[|Em%g"vMX -?AB7-.gSm{'xg5t\``7\u~^m n0dЩ'K 1_y^C\AT+t iyLX pľancpL40{fլƂh/"=pqc}][ syM;kAӧqP5WLwutp¹kzKJk|aK5޷{t %Z[NkhS6K-W-:r;>#yov_r2`cJ=t^_F.߈ 3H9P9\[Q#/W6. OqX\1K|Y8Y W)m=NYnxH,0p+ -b^^mkkx,~X=9xw%i#ɔΌ6gA_T,p>`<xu@+n&FvH* ~Vg7h2)5WpjҌW.E+?\8ĻEy4S?i\j"%fVc8 0L%u11`[U?d+;[@{蝀5B0֢or ,p61`t_wd1)>u'7=__ F~p`WʪpI~OcʆY;*NAoOe_ڿ+ |Xy/p!iжggAۗW~P_7 Fs=(iz=>VRC(D]ꬬwKJ6"N48pvm`.)<^ wzts>8 4{[7a H᧋tرV~Q XR❚.4PɋPx~p5 +s^,嗖W/-ua;6`ʀ@|aw|{|(/ PxAu,HIWtr) cLRytb ʊ O{8Y;at Kmf,wl|PB6(Xi\㲸Ĺ]9מ~O ~ dJkp %Ah#Cj9/߹k,v֦[Ȍt5 p<>(/[.e$Q6 pP7ܹӅ ׇP|aƋl1/8UL)]͜1P~NjGP5RZ@/EX|Oa% p5lL=J_{~46㍬gbB'6ٔ)۟u/h.𹬼_S,ǂ]wcQAmۄp^w7k_ntrrwoѾ Qp:*OxZ7T /T҅ H.}}`i8܎.܁`5r-}+ %B׷HgN\=_sU%vm~[AU~pǀO ThOa||>a pmҪk8Y!X,Kwȃ7+wO6.BQރƦ8L[^/޲Y^'|Ypة)<9[>t޷,s3k2-f!2~`@[ <Yqs91dHoU'0w1Sʙ.g|1, IG1",b?8q>DKhg,H1Mgj;B;4&MpNudLCǐ$#|,'otK~"^OqXILq^]3bO|>Tf4~OXfnE`=tT4K񞵀7MK28^G1SHXԘ i~kS^ oQlc]mV7uExr 8G:DS:v @\r;BP?)@sBzq 6:VKca_~7yP4Z|kt-5t\( ={\^0E:/WirY\sսK~-i(b^~$tX\@6uL|N5Y}E ߿P2MZ/=[:TBįśh(`cW{DkƤWd[|\qęe095hNǛT :CLm_I&JFgGzT1}] |xoe޾SV|3tD01x:k0]㔷:Wyo\ʛ2R~>cPYuhɹ~;wv؍EaB/ן֥xN9܅Oxcc1? ; b&MHxs;Mر  A\7EJ{d=pC1pLfYD->M>,/v\ǿ}@j,^;.}T{q_Rm7x0}Vy1r؎ AAYpgF]xfg<(-װ*k `Q0w \氫kBqE}f" DDĀDTAPP0<ί5{ }ݙ{;TWWWu9 Y8V9`X8. I!Nnվ[:ki.˳G MT&3N~UF0ZV~z==JNK^{8 շx];ÖיS>RA%Y7ҶCfִсgze! ˾`4Gܴ IN`NT< L.n3rywE\e.uwU,PGɰ p0 r߆cјa}qPjyU0F>I+m4!7"^8{lֱr'\\W'!В}{(xx} V'0Ȉ̀Ԅ`ܟpNrR%!mh7Ìxp^^CX]'F7Jݫ0Z֘nR'p% (C <\-@\ 8f+ Yj&+$UmeY,< +!H|a` |#yV|:wsɻzE䉴a4AmP B@r_K} V=[[\˵s%@9Y:AlylPL hǘ7BWBpc`<W(ԁv('nP!OJ;eW h,yK;In.>5/1ѹ 0jBS@Ę*m1lD,WM-\W.FaR뷑NT&ޥLZ3sU\߁O|LBC,eǨ6.,>G@&S[^{u?Ʌcɼ9Lsġe*P7L l=.uɛ[.({D"=c2g\ݹ7o^(oG a{Dpmsf'31)xrqy{y`_;_8к{ؘ2psj i/;wuAk6cYkeA2ÊNH rcY͇ĎR69w0F&b}m @ Hq6qJdQ` >Xg쮎?W_^HU;:ԡFPt.d:jnNbCBQL^Yc\nf(7-gYK1tn"8 ~?qb84Oas{$f[U߯gO vi2Lh}v\GKpg'Z1x{*y[NmM2UN 2W7>v[ȭ2< Q@L\ 9PBG|ͫovuVف!DAy\l5ε[ umbbb%ל`wTǜt2y9WIJKnɥ2n鳈#K{a.7 Bhx/WSXW}Z6S}BW?H'ZhE\3V~ 4yP2rﺡ>t83 pq~v.rf3"NXW d} _{bp{݂k邜رG?p6U~HmBL0-ҩ#G + :n=( P;6 0㣂1u ܧ^}=ή g-m [7,peƾ~w(DLǜ|p|uK7xٸ}9/ pG qmxFB{J/V^3['t'mo8oE\,2#lx`pJ$I(` ,aJpqR>_KΘ], Ý HP|b\6nŹ-fcߘq`,7Êܘaϼ~c2W=͐Vz fgkkYq}}D6vfp | ~ĜWLCu>syfnߊxGNo0'}q[%9up ʴdq1S2+Ʉ;3KNEo@VTܐbuqG{-QY.o70D@_-U>@+3G S8+䜽(U9z'Y7/&5*Wqn ՛ǼRnӂsבSqW\g%c (PLຯ,~Z@e_ERDVӲ.u~?\yTL4NU?~FP2% !ofL۹8^)_Aq r*7\);f-\:nyuW-;1^ئqRGԹߩ,~2SW0/P}(oYt*XOEb7qia#6Rѹ_'HL۹R5낲}_~\IajZ-UʹqbQ3KxWHc2R~ʫa(1ycMT(`6M,]Krr&Uun:u.دԵ4T%e2Z^arUcO*߹v?!EZo'e.|0H̖\&,H 8iIWktҨr5X7uac65XAstp2@ԠXL@cZ$7 Y\U JBCcȆh^1p9F|ɧk g(AmǗYݐ p(v y<9HkCuxy4hxEJeΪĴw߯ C$eۍC&R~rp _eB5&$뜝_8$&3{A:yǯN\?-&;pGoK_? x=#Yf-}hdB%8_Pe: n#/p}z=~{z%'kxg/\n$%I),DX[G/7ڵ$5K*\f'A X}A N=TpZ'(}KX7@AHwI0#N VrT9L*e?Z + p5So#~07Co"t#lnt>A7G܊!?;4iVn] 0 VDpIpWCrO@9826O?N1Z,3w*~v\;-S94׾T&^Dzv28?E#8"bRC L0@\1^đ Nk] Zj7=x<1| @ NѱG0ʼn nMUVm<Dgb $eJEN2yw:#~vV9KF3!GA~Uϳ(.O[@LbLNn|.8=.A'd'8 1py->]1 0y`ن`lITE0R`^op=Ѷsb!CW S1j{m߅`Jc '@e31ƺU/)6rm# 69ƿ:S(wG` :S#Xl-Bc_Wh B"dx#.늮@ ၎ñ0?#p:g dpb@:/Txӹ=,pLgip=4/U~z_Bف.7 j0q.ґ9eC\㽪A8I=7y"(ul+k_ѵ:]|7m"ֶ`F4)cx黯4Vbu0;\N:ؠrW~P^j#7q~@<@it(}mc {WA]1**XN0Zpa*O |qөNy%Hibw718V<_y\`B#QL̶' .33إsX;{o ˜ͱ٤|Oo|O3 LoIـI?Vd.uIϹB;[nnt|7 4iThزK(<& U ):cYtg%nIRa|\fˋ*KQk2%7gvP0z`VqJG~^6a\e p9F}?BۮM;wQ_Iu2qs+WK(]%Ma GKIe>PS~d*ǓmO;eQ^Upchaϼ(0H/0~m}@zMm„gd9I!.B2:uAۅu;r̴S>ċY7\1y~E `9w? Re?75佀˷n;\kxM+w>rxbpr" oS q+p;[o%0 tI@[bŲV(T?`X39g T)9 Jx:J/ 15kr(՞|\ÝTUFg!f 0G0[`nN):'U{V9Zs:ޜGρdo p$V.1ЦdvhC#B[|Ŭ[yla pC½u,sUV3ug(iܺ (ξ^_FHM~N!Hm+UwgRqѡS+q#p;ܒ́Ll zh3ToFo^+k9*fr$-;Y뜘=Ut ~Rp>.LP.a\Aggv?qКcNP1@G,Q~^tn^wm+8)'!R)npOF\c YH.9߳ǫa3<$ O~~` TfKu-=5&6ʑ:@*3lՉ7UV-LF!d~uxTs2s2Y/ɠ?M8adzhSY,$p>q &6!V-\vA&P8YT18"ƶe MTgL%K.܆r,f@A*^ܥ8m2f[|N^.hb*08V 2\xbƁۑ߲yja_y.0?$h'/;W=*NSRlqDl7y,|^@OqK 1Q9h?Z7LC'JY6M2Mf}ʘ[.!~ʤ<*`I]Ćmw !ycoscOöuIϾM %_с{oNae;13P_qoLy`PSB.th3k+}qe\Zot>x;^Lƕ"F,Nբbb!wf k"wi(G^74(N80ʆ7Ў/`keu ;Mky"nh K(לamնH@0~w0hˍ47qBnsöb_%\npV1Vm|~Svft0cJ>sJԄ`=X|%& GK? 6) (% p:/ m3i+fY( veiqu#lL;p!?1?oL).,zpK~賓v( pWL6}o){hA7[9n&'bO"&m& mYO<ޢĬ$/@&Ba(O&y>+ 8<)K(:s12-EqoC\RRƔ"'lOy$xh˾p3IN8LVjqRΜ;/; #9֍?q`g"b2&:棉o3:pc2ybY-g>1N$x]`rl6YrNq9W<@ȽGy$\[:op.^8>H! Mp<88є@L\˓IKqma}^GE\/\s׽er c@;rg Ipp;I㛙\P$:E.D^]г?=q߸<ՑO ,cD݀U=ە91`y:Bu|siOKX&^`}݄4S||w~wuu?]Ft40ۨ\720VCnh)^mVױEvNryMdsz1^.u0WrO e p_L&&cb/&]*ڷw:n#^ Pg.Wbg;u &2۹cٴ 6pKK cy8`?:*[֭MlSh>I m_ֵ5!,wX.psu]'lϛlvwuزKXٱf'{^[pV'\uqa =ok!耄&gjf1b/a$mmֽ` pj9~"ECwц+?-x$NtҮ nvu/yeK`Wfq^WҊAx/!^OL;V4 JFJe8_bJt] ]o:P{cy ،-`i >(*u~:ma ! v'8wCOfhںOLf?tĮ]եCn  sۮǀfC ya\%Ot. pE.`pLc[(PP0Ln d@Z$8np&18}?0F'0 F+}1όyO܌ʁ>@3P 3tk"^.`4g8lqj=N;10<DZqDeEƫ c2{2fE(GeXqJ92 "0,N,h }0e  2'&+ ֓{3n\q}`x(❆aL{#8yCE)k7z9%v02]az d0\0p{&Ԝ{{? $w0QINTʄI"kp_Xs1p `-NbQFqRS)7珺N9wkRJ8\ojPw,c[sQLv "8<6ix8zs>f &Z>dù4Ą(O~Xi I7 PůVSC%@v0U8A <=[@ŵ厺Cgg6pf3 d0@5P u tA6+~j[ DoriAoc n<A~:jcܜH1!2DoiBB㚍O:JLF|ЀsvTV tުv88ˀ2ʾ5t*uU(7uA<5Յg.+_ qpKCׅl'7䏃W3__EM鯊Yd/*$fKݳg\ā;r~WzP5LtcD-,P}jv, (Җb @hx,fܓNywۊ 0<u3ݠn7ѼI nq pHAoByk75hq%ks57l+-նV!O%yC"qL"B[:j&ǡ{~KiD@Rg}K__P9^vu?b'0$'y LāZx-l}I/o\Y@s; > ݗ!EpwF\c^~ 6 ̲yyZXVecވjh<oIrSE"Ņ+q%㤊)K0TA3@9 @N+\g!30O2yDz7쓷`60p:O'#?G& c8d-4cL˾phGabw`d@:w~7?N`M(#=+,w+cGzMSʝŘθ#. c.NiPsG^c?ʟ;{EX"p('Q69?l{+ʓrF\C-ǶGA'7a֢.Pa$ʜG]y19 ʝ舳1`8"Ao"bXmSИpS2l_ H{K08ۦOʄ}ΆFA2.??q;<"hf8GnhtQo 2lcyZ!fQ$ZvWt¶Q<ުKC) e-kq{+_Y>/oe+ciBMpeUΫ#;W|pF[ ztWq'$cC_xÜRoj&>A ¸$o9A' qA vvC|>R~6;WFlC;[b pU`"2>lRK}qgqy(fY\1 o)cUxP#pNX."äF\eNH·4 ̢Kf$,I QǺ_k$c@\eo-.v܂n\L&q<ӶroYXo28(&<Yu4\8e(!h< "#5.`A7J[΀(^ dNq8<`S8>KXOf{ L"H)'|g&vX7=IQ2帀y{+$S׊\U pT7>mR MS'hJ"%]=G^7 ,ϡ=3q'Ő7Y%Qnu'űOh˪)KSa}D76utӚ p"}2ʩ>fkr.}7,>2?ℚ6\R1#-o3WfQaR8Y[AK.;;v1dU &)oF!2拷&d&Z\ĀA/Q` A 8-N d߬c{~1d Ċ 7 d&f˾qRYSnS1>iJJ䲚% b@h/r\.9..*ym'g@;P]׭nY9%HK;$aaq'rN^OZq,S ͓ێ?}0e@\X[^VX硰2|ddk) ̈Og0F{1%Y(Ʈ{lCh@-[ؼҏ[Ž hX{b6.\bZ_sMhDr5mYk<|$x=3+J3y T{K V7:Ze Q'GDf gw&-#9t F)&"&M K _RypLl'5l0L:db!*0qTp L#g.K|h2]SHu@人T'"q:e:ZkDok\PUpO?5 Bކ#e"DX(:{+BbGydDj88qk3Hf8o5o2]Cv';MIuKAE::ZNNmj#I(fO,eYekT6$F Oy LCp3IerQNNedkw.kYeYV p%;\%9@4 '~aR&t#ײ p p-ײ,˲,\P2ur2 p-˲,2,\\''\2,˲,2u2u2,\'\ײ,˲ p-2ur2 p-˲,2 p p p p-25,˲,kywuI>Cqr*Dl p p۶mNpTi ,NeYeqw]w>NNcLUCQ]4gVH i,=뢣 M_zcSW޹I]Zݖz꩙}S:ud p[/L̘SP׾OgmdkZeYUw^ni[;9UXQoMv~a~ʤ_7\tqz'~32@\'Jg}:ln }{k֨Qd-p{Tni ֧mү~K :9UH;88nk\ײ,˲T͚5dXĉjzw]v_~NNslVM:Ĵ)g|JN;5ݥiz5ӓN?4=*4Cҍ66hzΩӳgOG^ze:wԩSeT;{f:vm<5!u]/={V2dȐdĈlXeYezd֬YɛoL2%{9' O#GL 5Qg fv8Ut3> py8sZƍKƎ>-ײ,˲,˲,˲,˲,"\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,˲,˲,˲,˲J*\˲,kuvɓѣG'O;}}%'pBҨQ#' MG}trQGpZUNK&MlW_hNԲ,˲,\˲,*O=\<8رcݺuKFݗ_~RԩSn-0`@p5*Hwܑr-IA&N[f 7ܐ\x8uo± R&'8@ܻ+\=~ !hƍ;,iҤIp~ʲ_}r饗*^>˶pYSOMN<ĤGu]~Y pnӦMr''|rruׅ:,?~p3a;a6:B\6=C ;C=W3uIt0tJoTn^' KԹ=5j|>2qZupɮDwz4,˲,˲ p-˲,yOCN`[*c 0=LBLv.Kqj #|͜s9!,&guVx[kqQ V@ V iӦI>}?<"H?>;o<_G!)OA6CbF0}9E;sV,]8F ̙3)_Yown.tСNN"6{GF"SE%շԎ⃿/0h˲,˲,\˲,*GerU%/ԭ[7lor^&-[L>yTԫW/ 0Z-ۍ78]v曇$rNjժh!v"Pw;livL2tM6~n)c=6omIn}衇X@bʝqqɫ&(SG?Ӱr}G.7ƍW-U}m`gɥۇZ,9e ؾ Sձ64ޠAxg{(y!-a;;b)_66lX믟F+&тoi(Aw`&/#TyYpg=LE]3(>&vn1wb&9? tr*D]:wܝg͚b|*"i@\c p-˲,2,˲ qjq2~w9*oyw>q*ET SKWI.VҺz9c?$`}F^VNCO[o5 l;߿iFr.v6•{7#^;Lи\ ;wqjI_%,sx)2m7V缁1QC{g2AjmڂԬYa'8pqW)\ʇƎTn!lIF-pg̘eTP;2ĽckYeYeYeU|hgL*֙ s =og9ߪCܘ6lvIpv58|q*;1~jTd;8sN\kY!- U"eYeYAeY' s=i^h;%tK\/\ש#܄ع<M;@륋-ڋuK% &n% j͎j+v8hMzCT 3k´ze{~\LԤf… I x+!~.+Wo !om>`D_=VeekYeYVeeY H9~@=z$=X]0u4`>Ca >8+7mU؂ch=Nփ\nJNT|!'nS&/C\sZ裏&|\~W'$:իW^ p,k7=0xRa/XP@q'뮻 IӦMK.$iժUC%D`NKz)\Z2,˲,ײ,Z5y3\g}Uf@Xwwg;*l-?;h{E}'jұD1p/lXk\SX~ 6yp/1 pRnv*LBGP`ضr\Çck m~6* pU-\˲,˲* p-˲5N@ѣG'1 bՄ` ;rvTl[f )fm*:I&Y.R,:829qVnKMbq2qiBlN Za4ח^zx%; הkʂE ?kf# b*/?$y晀:w ѫW'|rҲedРAVekYeYVeeY HC%r&۷gښMժUovMv)';iҤFmڠn{֓6\ 4߿}& 9rd:u5QnIŘhLi/}Rf6ꪫv<#o k޼yțrB'f_v;Ph@^yQvòZ*Ÿj&f۷Aq;jئ\~剎 g [@XwڴiaR7UYdkjZeYUYdkYeqRK>f)(mp2믿w}8tݻne&2 ƎκX &ٳ 4 W8|UHF\!A.]F V7HZh-har؆<;vLr!2dHmK8e;a_|q /qo۶mg!&/~7e_~2V pU-\˲,˲* p-˲5R!N8TF!"[<:thr&h +AF{NJ%s1)m` @Y-p€Z-^@@0pg7}p y.(yeY ǀLL}G(ʝ&,*~nncC]&X @}䭷* pU"\kUײ,˲"\˲,k.V\wuW QDcǎM>D!/}sm9XV4&aZŵM{KLZ:Dj ~ӧO2s̄p @ L&/p8pqg'>K/ ]b݆߈ۻwdʔ)idhA.%dCUL+pq;&-OÇ'ov@\єQGO?+>sGyP+(ѬYBh&S̝;Y6f̘ ZO.DC!m%e@[U2,˲,ײ,˲J!`b&~r=$-ʃ-?V@\+ p<Gh@ /vuD&@3083Zbw& nj<6,!!4Ccx N76q k%{Xc: Hγ8E Gʊmo|IP0]v %ѪQf_7k! 眲 h('uRGuǹc(*_~ e;(y*,qyx!3aHQ'LPuB06 ox \Ӏ{/.Bhu(7߄X7",P@0nJ9Qc o".fM`\+1q CȉjժXwuD\ܓ͚5 GQ N^qgƈyoݺup\(s`is!2s $…8n =' j@<Lx;Nd oa7y]V5hsO?=?u y1(X쟐ju;S״T߽%g~ aS(7eX2,˲,ײ,˲B8h)XG2L^ߑbLVɊ" 0m\Gp/p ''~馛!--G+(]v"h3w߅g7NF@[!3o[+,@[p#čOMצ2`4"nLi Ծ10{ 񿜹2y < yv~_ g8f5!䨭-h:e#TߓwU^՘T õ8jKy]veoͯs˪"D;@P^zq ue+eǙTn \S]f%IQN'~y|g0LONxYR5J'w0>8J*KoN?\Gu.! 1h_J3ײ,˲"\˲,˲ \!0) ֪U+ź,gb7Anru<AXú=I9,[ ?0Ah]/wVQtr>`-NO 6Q9C2}=Xgh&n'w p& :P_+|AXVTAVǾ6lS]NO@ec<3bՖF9kF&(xvYgUct+r][I'|rBBYO߿p 8'-`7M:/ wa*s!{t)g7nJls$siU\/8;P6 M翣ngDXMб]9VUĹwQtU'2( ~YgM~xx[@m7z#@u\Z۵^GH-»j~OנW*Ǩ]x5XG~Cqr ~)z8G(vE[ܢ:=r`{P\I p-˲,ZSdkYeYVqZ! ,b+ ֡9f0XWNw着8M)W[9pvy_72uu i?D\\֩I8}@!3˞`kxJN2# a6\r~<,7nNX\N[9wl֬Yؿ@Vc*OQEw@NHG* @e8_,bs;cB\ew`yF`-#q^3SXD^,+d+re-«2%O}]E}.yΨ.f>d0hһ}\VW<Д6#pp[_<`۪s;ʵ2.jb\ъ:ѵyCf 1A9C?1\Ҝ9sB[x dp@}ALG]| p-˲,ZSdkYeYVq*7;bĈԄaUw pu +@ @~W'Y3~pDz_^ʍ$nAT`t$ncx}L+Py@M~c6p<& p'r$$ +eK#ܟr@*epLF>j&LHa + eC̑p2q br P:{FX̱s; M$1OC9Se( hR8ńsepW˃{7חi.rw@YqsUmIˈPT绹>}49쳐' n^D{z:\k\ӧO/:`kYeYVeeYeYũ.z뭷I(>| Ts1Z!#Ac|NA7``+h!Z` T!¶u%ٰ k/`XW3AXF 2O$L&(? $ݷ?Xg5,yd%i/ui:8*?smT=X0 . I\˅\Lr0`@H @)2oDRxLJnqb& !$I# Űw{>u40 } M 5T@qAa+uIWpE\Zsg- V}LeY)*U^s< `c8˃~gŇ<(_2,˲,ײ,˲TwI ~- zMD\ ",K} .Niɏr@Vuy7w@1AЗHr&Y$̻ x+q#XpmqN^=tCB)0i3 Bc\;22.pK>Pn?\R퉓Jx{$NL4)ZX'wjsn}b]r`[p>"Qι{Yz'q V _߾} inuiXSeYeUZeYUԁ+hj# $j&YP29;mh;U0:p߉.(IR^[T%sڴi!\+s7 8@ׯ^| O*@Nؙtu@HjȐ!a 2L:UӾ;CGpLva/ܾg_ p%O gT[Uye"/Aɐ/bkw8&$uK}}/0ݜ偋сãtQ~(\3*~$ε=qR$IJfA]E&C!Xk+/ZU!^._=[eYeUZeYU4\;&$z`b5+HfG=!Lnqf'9[f`-K@fA1p OF[!Ph&aqG)0O7N!}tS1cx\a= w(P"9N\&ZY5m /qmr'1S8s0$tI 9&Ry;YrӴ@Xgpaj6^UrD<pu3y4yV[rh`BePnw!P<8\AyW)>U1po`)&zG| =x$y@%<.ރ<(!utժnF(zXrm3q:7]2,˲,ײ,˲TW4:C6#bDUh HUp`%,.,tРA,[V@E [# Y^1gf(eL b u=؁H*›Du[(O&211GJ2y׃ 7VbSo邤 )c`I@5PzҊ (ϐO93LVG/'-~*sUZ(7 hR 3ѤI]#T Ǥ(߫ٴiӰ<+t4~eYeUZeYU ><ĬF\ ,}`}hM&T(lܹsCN͚5ѣGqTq!:a5EN )@_J8Xre0>s[g{EY\(gϞI֭(ZkC?A>O{rcFwg0`@(KjuW]uUj_qIbgW\jp$fs/C@h]=|m/(xXcػ%\˲,˲* p-˲,*NpLG@nFMG؄M5Q׎wHqѢE5.y?bD%X4^ p L'g0=>l~[]+I}f OO&"(U~ȋ+%eA9 U}9)[Dh%mQW 4&)$Bȃ^8uw=;0 i{W^bF{ł"wDt+K]DRFA({{̝{Ogsk.(Mw5 @nkf"ŀW%y2d%$sjr!00sz{\չG g] IM^Gz;XS; y{r`ĪV`zW;kѶxz~$!d ZeYײ,˲TW˦%pˏ%ۂRmvZR]S6Zn]8O1HO:Njh|6J)=h$,VBEF0I|)ȫk[^yD;gΜZ*@\, ĂIR8MBD:h@bxg&1KzH2 qUܝSHBJYD[s… DZ?;8̄>|xؿpQ2Jq`Glwڵ;U8`uO;>g6;eʔgkS A}p!=+Lx/L=?OKØh &.-bUyS]c2܏$ Cz/yoWp,/v* V׺3i| a{80 r>+V}%6sݶ 9G_'+>¹T&Ԅv.GLe<{l p>R:NhQfd /I&m{ײ,˲EeYe奭ī\2 x ,T%i[,(4Iؼ@XoÆ SO^WLELY"OҬ$`R˻'w? ɖMʐ puPw!dc%403Yή孒 e 4e:[u `x oou *J{% ł.IX:I'`Tޒ(vZ"_2=d~'٦zZ).fyCn븋余 ?D 6,:uj׺#\ >X\QF@%]^KDgq+dG";UK7#<(j |Ӓrp 8;yuʼt/{ %I X$?O<ęWx7p- ѯsދjCXo/&eYeY; p-˲,K[p f.]X*<hģ/?&GD<5T2d%418t\ r3 PUՉ`b dCP*%@a' +ikb@g#?)҄/,{ayS^e ؛ruUJlT a D,)'!V2hsuA _1su! cw Ԏ%K̊p@LPA!:K>1`ՙ5>{kAΥOp3B 祮]Ht?Syr?v;Y{3u>4Sb?'y߶eYeY; p-˲,K[p Y@ Ԭ&0v: ب$.Bx"fs~'&}:bB6muC=>5|>)//M.ʟ$!"]  = !7kZ'  ^w똣o1^{[us޺x2Yw߅&Vرc}\_?ZIb 5i$]vGd&:ny?(^eѣ/e?b+bnsXUʲzb9I<&@y%xݱ\bFZ^#n5 p<, ͛y/=,h "`$Q@ÃtQ+Z(k^ S(@RUVOR-1ꐅ K9'8qm~Y^$qKs9Lc@ xDŽ|uS& '1)'޻C=SQ\:y݇eaR6@eYeY; p-˲,K[ p_<ad[x#oNx N:S (F Vs+ U@YҏpB?m)bLpx4s '` űL&)A 8tYYG;IPY Iu %0P>d>!yEVdɐꫯ4}ݻd]ZE-\˲,˲v6o߾/]5諤%/ײ,˲ p"U2,˲,5{n۵k@CX+b u]kE5z\˲,*2DVQײ,˲y.~hС%\)Yy0 ^xa|=|wmv p\˲,*&2DVQײ,˲g}B;6ڷob&Av$!aÆN:$>;'kk׮֭[犵,˲]XV*jZeYjժhѢEի)SDs̉Fx!N"g{qM0!Z~y@[W_}Xw.!/pkYeY֮+\Hdk p-˲,ږN6-Z`AիWzkb߶iӆ{TZxp[nrʳmcp|MbŊ-˲,k&7JE=PTlYm9[Z5דmZӦM p-˲,&?~4nܸg _mAf͚MB'/_z%Ƈ0 o˗W&7++3g-[,R8WeYe/xf֦%cڋ.'O:5PA6'|TpZeYUX6lXI x+[ly#q޽nРA0 iKXqo*BfWeYe{#y?&C?7xRPͶG}R%J˕+WKZ߃j=#L})/guV ZeYemu)СCԿG4amLvRJ*_v J%/ ?f`YeY$?R9 OX`moSu駟uUWv}؊]vekV#M$6l ˲,˲Z;w7o5j(-{Wn+d6@[bުKȄJZEPvVxQ[ O\%5{e$G[n]޲,˲]Td`W^TJ[UE;سg?>WeYeY[ѣGG|AD2ů=L2krw}%SVVr`ҥcF6lI"_uV 2dHxY~+ܲ,˲vUp #&On:jժͶݭE]"mF/pЖeYeY[#bN0!֭&MRHKϟ_=P<;wnDB2Z;T1o/Yd}m:t}Gy]brB,˲vaYh߾}ˆ# lݞ}وs=m\$1cF4f̘0,˲,Z͞=;SV5RhҤI!$BfB_cbo$K.&׫MbykU=[p oZeYeYeYeYeY[(~`n߾} y^OՂԌm]~}`YeYeYeYeY ]fMY( Jf)|B61,˲,˲,˲,˲,˲_B!ҦJ!_ŹMuwoloYeYeYeYeYem%@\-I΀ĹsYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYeYem8G͚5*T/_fl6fl"3Ʀ7>hŊј1c<,˲hիCF׏^x矷l6fl6Ȍi5N:E'N G[eYK]t ޷C ֮]u1UVTvmfl6fيi&ZtiԷo[o4i⁼eYU4z09uT@^3f8afl6fl"3ƦӦM;k7|3]˲,*V5jT{E<)s1kl6fl6[cӓO>9V8jw_TLkYeY^xQҥp;㏷l6fl6ȌI'?Uײ,*v6lXԵkO>!ž,O>|Q6fl6f1(%/;{5l0zw<,˲I3.Z(bFN:6fl6f16mѢEF<@ԴiS-˲%%-ƍ}WQ޽CO(Ɛfl6flEfM+U5k,ӧOTFhȑҥK3g,˲,˲,˲,˲,˲p'|QΝSZJ=Q͚5޽{Gzq8˲,˲,˲,˲,˲m9sDuԉΝ]6jӦMD{7jҤI4u԰mrƍGC=hј1c\eYeYeYeYeYB~m4{~|G-.kJyXTrѨQGN?t-[抴,˲,˲,˲,˲,*lAKh~i//K/oX޸˖-;A{」-ZP \yL˲,˲,˲,˲,˲l5k F5*{Wo_r%. _7\x^{s=7xر⅛%eYeYeYeYeYUH"9;Lmϓ톿o1V6-07@\;Va DӦM wP˲,kKWf͚|'4hPԶm[2Fw\U4lذkE'xf8qb*Tʔ)cmwASOE/V$Vtg^|EׇmmO?Cׇ(m9^  \bFUV-u¸ދhĈk1СCjՊ5iڴd=sno%`{ķrK{o>qeYe"{gk*U$n>Z* Μ9sѣGG#G AG \[6@N:J@'|rTD`9H"-[UVQmjbyY} Zl5647x#RRp?E; y7mhw9K61hqBN4)SC 1c}#\?P믏n\q.]q'otGqG^5 tuxMիʕ+}˲,k )S{7xX] ֭[mwD1kYۨ@I&Q5B)To>lK'c%K s{u)aYeYeYHc_xCqE[i oVTn~ñ u6R|ە={oO߱x.cl8#ײ,ڕ~X#ZN;N:V\}faB-̬v-xc1y9Za(Ӊڵka_~7nUzX;wn\׮];CRY:@s&?'Ou#0 ,eBYVXz\gqoO8;sZp )uW^ye|WꪫlfsJZ{gs_DVm%(&?m6PӟO~.m.[=Ƚ辠-K8)8ryXc1kŏ?x^wu?<ǥrTR+k׮e[%+xٲeƤK\cǸ׭[gkYe# @1cƙZTިYgL?MČ&y|ąEcH)6bJ536l0رWw[#SN9e7dp9fƽ)%/VL0on6vsfy8>C 9?ba"X1QxM0^vͶl!zŧrj̽X|y?`()rG9LX$҄SJZvL98>ƊRg[3Fܮ^:*W\F˗/de%K'rČ=sb9.}|(@cYeYN-@f-Sixl$%%f޼yyvqWqNmʱ;X/^ww?N qpa @y/0srM7ݧr~BPO<ltk/t|\w6vS&ϋ}}A1)bQ۶6Κ5AxIŮ 3mf߮~|I' .,ٻ9h6V% pTѸ}lC_RoO!faE\[LMݷ\׬Y94F^CF^9Xqd]vev_+@8K mz ľ:--fZ5=Tر:w{C9`#8wq~! K7('=xk|.ˁ8Vٲemmfx9ꞎ~_{lCeLmhJmS65B'~ԩ O0< @~pZ|-.Ř@c^ʯ OprZx*`$$ޕ>kƸi.^_oBb2. 4 6r/] B0~}e,˲R$H#0[Q=R1 %8 Tc (y fK+W{8tt"JR @#1;ȝ[ouV1C\%&YHv_@n4lذPò,˲v: YcDVѣG6|?}HYk8&O%SPvA[;Pk$+oVׁ>UfJUpu=9{4PUPB6ܨߥoнW^w ku`7ҘK]ˣ}:bC2 p-\ڜ#jڴih48TRҥKG>h0ugLԊX(G+ke"6.cX%N)? = \L*=/\y̲,)5mڴ^$eDM/ _H]>ޫ{G^{GyYĩ\}C?$ۑ54\\v#c~~C:9OD3 } cg\%XecL#Ɔ xi9 >:#ekZ696"Y@Vv0Q p ^zJN/x綖sʄX%x^w_0 1e#ۢE b߆ •` ,˲v>͜93RH?l! +)@/ٹ _+jmy#Ry;Q0~W\q^{L0a_=XIyNRGeX:w|e"xZ]{xL-; [^P/n,яs^Zuz3qWk{,ܢ !Kb$E tGRawR(5h ^\f3 pm6\ks£{AF+]w%R| 7 as5kid[pJ7y\@(f <'>kYe\7o^4iҤ%<:tSPO >CC_}+{8(Q^[O1rn۶mDgkklfkmN  :CUr8_Ґ1VNн[Z=x,+q!n\OWVm1(ˮ]FeѧOZfHöJ =:l?jԨh@SP'oslyϞTQIeDe*mMxy3jժmr.Pexy$RX{P.'|SxV`(Q"p0z% p?"ug%],ڞbY83wO&OKl }w>.\:y|6e:5 ?~8C=×_~2߲wv6Q1BI6g%lOخ.(6Kߴʬpڷi pmE+B*wE'X><Ǐw.˘C9DpNY(G&SHy+b;wnPnJDfeYVRn݂~4Cg xe0e,d1"-@Df@rL!1@qEf uFc`Ig߳g]ttp,jisq(e,gx0hܹKdsbR.:5T~/>`8/ϽIʄ9@:\@מQ_VVVoA:9?q~iٜ+ξ7%T@&fh/3,^2s͙3'&$&#lE;C߅m1ZIWYxj-gu+-c%XqE)xmݒﬓ~?5m&޶ZE9FbD]3>Wa p-˲{/׼yʀ<Ĝs݅:"[p iuRڵK)YFNkJu9go@Br! tgΗq{hN=F:ʂIy(ng/RI>'ʠA9V#0 L؟Z)V $޽{IQGl  mk:~jÆ ᚨ[C8@ lSbk+gpL;پ}MQΔښT &琉d$m#=Sxnٞ; `@ΟѾ<=LKJ <\A;5~&J.FІ'uH0 &/ƵP.mܱ̞> ASGydȎ8∔hJ!b(,fv0JO&'6L>s]ִ# dTdMʐ 0nf!8 Cry:k<bŊᚋRI(7V)oȿ.5YL,׿ bJ, *6?q 05,˲m*rt;CN1B:b-tx{Nf2LkݺuҁMAނN B$z``I2HBy2 (J[o 1L@ %`e˖ٞK$J y, @OXʆg// 4>މ%t'T `y%M-gd ( 'HwAU!%_PN: 1vݴ)ճfj|WV̘ϧ+c}d Y=7wlʖ9sڇsr<+Ω7Rо(..7egT3<G@_9L0ѳZRvmG2ö9'hOhirk2ORɤRf&IY맼$}M[#krEFKPNΤlRo'P8E͊V΍Y&%[}7{3tfxs|ld{^gr ̭ \o LdMXY+?nڶywdK0ye. 2} F,6r }8Ӑ2^G߀eH p،&H.oC6czqM›x3!жhi ɱpjIrw5q%ZeYU7Yzf;hp_>9MAO+::( Zx9٥1Ovm cAH:>3 j{(xw(:s+`H_}[r:x) odϲfuU<):\j@Uo?:XKȨW@k8x6_q>p~AXO,or-ԇs }'}s`:r!C2('5:no_7;f] ;d';2$~;ybܯ]۸[fq7^Z#/xt9^quoi@uiG] sS&c2(~[q7)CnCֵ[w;mq}Q>[om׋;$|Gۣ΁\.wh(zrSITw{=[r{N<P/'+x?YHe,^X"^Ѿjb7[nLQo6z_m`{&JOh?%a_8k5VW:X׶;$9Hӆ3<@৆l"mWҎԯ`_ID/𶎵\`?hۭU9߫MX\MJ*&^{UoоtI}Om싷,jҤID^ =˷)J@[1ohџ#v1pL+圸sύnᆋx'g92AفSCȃ{& _O~lWԧ׉n[T١Yf@V9{}Y=K?IKײ,˲" cu7x#VX`` + I'+Y X}3cg_Acss> ~dRt&l> OC׃I LcS +0Mque.a. q:Qނ,hù9uqN`|΅w2f:'s85܌F=Rd`K:Á7eɵu}tE.xovt 5lH\땗㚲/WLje^ V㥗_1-Eaw*/<7T1^8gUZʗY&kb\i±kkX5s6ksSmԏkNWGnw/xW&|x>g6@mt=g妛n306N[6j30g\2Q{6\q c jMcܜ3 M,G g-g=m3gje'kna poduuߜj$ur~K={XqܶQ.@xZqe*[+*V}YRn*4,||rQK >4m*/MOzonܳOkg'{9RոZrlmY/-Z7*0< lY/7siP9@-ygzd㥍Mq-|8\G F*WCy4Ti% Ѐ*?9JA@C1:2J:A 6xYFt(,gI`&72Mћh$S,#롞/N]4yk2@\A^3W },̀^12u2i[?> K}l2.u Ri|GiOw׿^xXY3F{e`>ÑbrX-RysAb :mЀ3v]vOn ]ƭ=r1`Wo9؄<°p _< 7x|ĝ2FpPTm6ʯWwiq46Hh#&X"ghyi@[N:*[ pvxQnce5!7VeddƾL).eQ%+4|mM̹TLQ?ЦP_5I>UsP*]zK٨?M>=N[ʵ0Ulğn\ |fxj٪T߸Z06x<=t` XTS}[d!#S>`* k 1o!x% D__)?PשS'x%n% R#ʎI lkAɲ 4uz,IR%@իrg{~m%x~mv6˵43bo9X3:èn] ps5H@3vz3(d.r1XX "vUiyѾJV.\G{A[}rNd=xҦ(њr?pLڔ$bA\ .m%na A UYHd,wŜMqMjJ1r # fm'v^{=Cp訣H!o̲L3y9ؖXrKxI/_|OwK&+( CjVP!@s @c'zN S4B׫W/uekkYeY pSI`AILZypUtBN[` ~2sK8ɀlذ!q9nWXnNɛ"O% ʮ <^ tU:tKyHl񀥙K [y+VoO+i\ \WyG#iⷪ eYeMbjV>K4hQx',1k*`} :w$^W@ ܉3JuH2N1np18$@!{gL85 he0{@&61gص[#З CهkR^6%<tvxU7JC=s/R:_$ԅ{'2abd{2;1.% c{l2)KhӠFԫ΃wnN 9tp@&Y~֨kxy]n(Ah ے߂~S `ygj&vYQrt7Gm[eHK_wRomv"0י|ʗ=Q&O+$ p! Tu7isDŽ:f`/YTowd Ln$ KXުPC( c{+䥼#\=䓳 oc"8ދrI9ӹbŦ= TNB0'!"ԏx ƪ/ /eYeY2VA ZDW`^U7a,RK%:|W%a9Dzoc}URٍ.@Ck :9W޲z:co'SkI@{/Atl4=H?\UaKŀ}>#˲sBy)YU.[92Ӝzwxʪ [c,5 s6r.@.2a2߇<;v>e^7cuS;%p1$%ި6}=F f&lYnoeZnQ!v if9ajFJXX6/#;neFmX+m k:F]i<>X pi 9+;!vv!וXKP\b.e޲ j߻r\'Sv`RP` x&ҋ52xXlVӷ6 ϾГm3\-C7d-%[^Ÿ^$76m^7^4*.l?B3x_tfV?PXC8N!%17VX𶳞Z 6V]$eSN9uSpk&"~.aBhq"$.JwO;j J605,˲,kGo` H4b@<@=:s4bf ;@@ߜ"2M;%~* :&&vv:)ϵLO~/2)Κ8!eq7Ƶ:Cwg+H;_ ?zdk{C9sڄqqV.! p p9:F}vڣ|sm.xNp9'F;uq4oЊ+0EbHkզܡ4e{ԓ Io&XhDCu_%v)c1I]ka+KX;(޷v HĿmԴfl/2B(;dT5)o`\;rW5Mws={#}{I -AE^J͸_@bWt^ԏ /:߬j #Xa/ [`?vƷ~?"O˸"ЖZeYtu E2Hl*i.]z$ &dߒW@aVʕ.(rG//WăcTྕ K ݫz׉w>q\! KX ^eB@Uc4;I$ ~s_] :+@| '&5'FRtʕ& (p/Qjoը^<)זwnwp7,m:$/kרA2,e&|j۰~~n3܂\4%IdLbuX6mֹR~+z5su ~&mJA p n3B{\oO" Լs78|h{YWoԌLۖQB;mq%1q24O-w x~~CBײ,˲&1pG PQW[ J($nB؈jc`M:~.p}UH&!u*sP}Bp0RJ ޳jOXAti3ɂ'Q*OUjK)_N%b/Ó1)x2Pѵ\x<<=]yY>FM$ٜ:*G_eGɧsݘ}__$jժin.ɼ[}gD΍{.GdF+rr . k⫯:H kfؖv>!<3׫W/e˾.K߰sw(Z=q /]~i p p-˲,.5 n:@Ab42d[- k;,T L& ԰PI⵪z+gxf'1SX 恱z-.VH Tжg&c>s}%[IG><8cDvsiq$4KpQp9 :]dX+ayU=9}YE|Wp:gn7ޘ pCUHX| cvF[=a ;5dPݣerZ0Z}cG_|п;~eNm%]tQ|/p@te}#^ǃ'x"oEeYe7 C[:&Y)|A{OZu$AdjrLU1g .peQ}sHGH؆z /ޱre 1:D Gʩ*a`)@*A'  @P:rͼGuh X+ڳ\:pqx 3pµkI&s hK9W˼]l93L a"u k>8دg[4Mz`lܮ WW/W~Mنh^F6\\kc\x:ٖܴ7hNsw> @6|r9b&'Am}=iov ڇ癐&^Y<^0jJSn3vn6lO%@JZv~NC{ Mg{hGՕ`kQ7 r2?` % ֈ+l^۾Vm:綅pG {u^nIz+Vy)#?cN].0$fsFuإN;m@/'IxM2 *gh_ WDGJ+&oS$G띀T5(}sNtꩧf{nk{~._S$r;ûDin!$S,Bh'z pQN&o0P&c&O\\˲,˲v@ d`!.OԉRa-o /J4D2&-R%{/ZySm಄NWz,㉦r m?L L8zV rщ'q/0ez}&pfs c{HCήNj4aÆˑ1xRwr~%)πz ಎ/(ls'-j 0׹ R_7T1\ӣ{9mLnF>h`(`5 qr3ּYnM{Ly&K}Y˄ `mϬe0EAbeնA{F[@; ǟ_)WUPH%ҽuRZ/?B/!%uUj)G$ ]ܲ$s13f u^Y9y>=Y p9k1`E |$J fi:zJct%JLvɿhUǓ&ՇeiX 药}wAWa#-\"τ'E/hټkSp;_Kҥ $9a?G|@ȏ\Fq^;F"Od+H: :|ƤXuTS6ܹ3O9E(z;q.cgbZPuꇄR֨^:oUWn*ԭ[Q,S^) uVࡇrWm)B0ԪU7L[4h=:g@\333333lp 6#R2vug.Y7c_B]o*L5~Ò. @/uJ)bu}@:( XQZd c@<,+ +0) BV_ I p_B!w⠎SjSeџ|4,Av, %;7jS0f_BH@Lۨc  Wn(2|hO<}z/*A;DsC>n*G%qqџvHPs·tv@I;=EywS^?}5e,o:@~{ zCݥM&h35q=d i@>9EerK]{+j?m vpzH~1wk ^nI TRJ}ݗWuD3|'zϣTeL՝SgTԷl>I)_Rŷ_G6!%ä37RO99j@ YcFbO@f p}N./L8Zkx]_KoЇasR6.p~q'vo~D!'D핎.=?{m4nX~"fEnR ㎆sG*o([n n?&W\jyw aP\LH861IX;VM.\w,Vbd|g\E pj[\ZIoŊiJ} 7p̲)dT : 4^Ǡ:À(YthJ5Zu{4k7{ k5` g,:  YRK H ॳ Xiݺ5^o/xu֪sO(xJmr SҺ9ja'n\[ۨb}Rks f]JS\'+ Λ}@ ܮeڐ'lL='E.!ȏ_KQ%.=z~a7ಁyڷ}>5K!(_M6ԼRdMB]~zo?PѫtwQrOX.z?cp{e4u ._[UI?PLM>cyrl<G~ZK k [&uxN]^L;\@$Kxu+o ?ùZ7ʟT?Og:WI#]\ಚܤ ?eBp H^yLrx΢NM ,YIsLO Հ\9p\ #^`KuvLiK1p)ApN7h#,S[VX|'xR6saY\@'WܕA~R!<]x;u>A=Nd:,vx `A7HAKuGKcHeΪūޏ[e`0X_=э\E'u߀K<ɎQl$ =T Չwx__nJ%1}+^9 4t]k ýAZ6^ pGJu!q!4?aK~ di2>@D3o$A7z A{>w]'Ļ>lag[88|09\&. u Z9󳷝[ue,ձA?B^FF훚<9\nr-w Z}`31V˔)3^w!c+k=@&S+%l(_#O9u+=[_6}0J_ג}T[ʠUQp3j_fwq\LP_>Ey'+R}c&3˴%۩r xZUo#P5kffffffA5> ,Rʪ` 9K EFV xa<]8 .N!>l%AY.B/[^k&:v`};e~:W zԵfK w3e| ;D@u/Hf8 NUg:ԁ*$Z; (%x CTLK{MܘޣR̕@ݶe#QV~3I3A??/\(L{AɆxpW2A>[Ϫ}lȻϻD8D 0 P|<Ϋ: OսTwLrQOPh޸->ݣΝagPxuR*WuWw' "(XR]q(WNZ=ݐo^w~aM#}TbOgsJq_8g凖됧?OiB~+)g.={{Tj"?z9WKkJ5kL{:u/ 꾦z|)S ,8+U_~qFzWԇzUr6! M@8ˆChsrw0]"1$ӁҙtL8bSϱpNK.\X.m 5333333b ܵkS=٠3BPXZ>:r(π@U|}y_tQ_i]w|K` YZxH wZ?ȧ6~@ ,  sȟ 񥳍ZA3PcD=v*qPh\H[t^IЃ@BZğ{2 #=|>ILxC^|`CHA|O~3_ +gygx]uہ:z:INLW UAǵ5sAبKxNBd^ONJNvn_P2=[okQGQץT@8:Hp(.T`?ԪJAY&)Ș ]c2^z?+w A޼y'͛7w ;c\~}Bg&~ Q*\7oԫ)6WsāL]P 9]|盓>`RߦʣnK.\fffffffQ0fr*;4E3HQ2`: &zI 7(ąc=x5q`w_ԥċ@:8c=PDP! m@<ˇf@OQqo& Lj\6Wfk2رå|;<H1hׂ.u `y'bUH}7DU 򼫓EPS/IIq/KRmz{;iiqeC \#3#bn jVgsQgŅ  zF`˻ʘ$R ďswVa7/ĤTju7&#\ 1M\/}?/t bMQQ7REmap5߀ 1-M^_lp9;ٗw8#f‘pr>p >R 6n6SJPntv`\  e+c](xM){168k۶m{Gpqtp "%q&iJ}^{y:˔YV ,53k\`+c3[/L~owVTM7ujժV| \3333333fl}d"|1-s@/M\  `Yv0`;e˖ wtԩi+NMz6po_?ԭ[_-< ڶm\3333333fp-X0kfׂfiÇwFIuJɒ%w<5k׮uX' No|vڴip 53k\3,dG;gΜ`ĉ… Sڃ rpo ,YbB0\'qYǎ͛7( #4i; Ol]v5kffffff7qeBfw}A\-_>Z0zh-ƍ1";K6=wtꄜSo@\xݍ78ϱYf.pذa[nQr| (`,{\[p| 2;hwXܹ-a!fN˗O? Ν (iBv+W,fS]r= {q'pB /8׻worPx W.W\+ϫ?p 165U$A. 5333333;wE{ςL{WF [a9aۊYG|Nxwkf暈 rï pm,dAPYۢ2Cҥ˰T={fhJBmڴ/Kճ~gAN&d7[|6|ܳ?λQR% e|T{;Foj9}idBV-lh)T(a`p_ `!K$9|>taϟuwBZީt;NMđ,XȂF{Wg?NR7ݨo߾ >Ӕ-sg.aAu#W_}=7\rI1DjժwQ(OxZ=z׍俼gHeVP\3333331-{ Ɋ-Z?UG:T'lw ,XȲ@Sۑ7ou%=w9cBk(w \sYgmK!z>ؚ%J{i!KexP/33wN*\p'O[==agvW_U s1*Sov  `_{e\pl,ӌO fSefffffffffv0&7 UV';uᨣ FA>lӺtbMӹFؑv\nݠaÆr]fffffffI:(pݻwn@;ZJ*AʕUrSp`!+Cn u5AŊ]y¡r>B]=xc ,Pr ߥ[o߶m[1+Gr)abx{駇M4Z0*?Ta:u-1m֍#7 u[JYYfпiӦNI fffffffg/SHhSUW]  Y4;ʡ YTި, p(ԁLd2xUū,XPO9a3re1W)jֺu:~\rW^yA=?W^)V7h гt_~СCz*I"(o)*663kffffffffffffffffvPͫ  Pv1eAzUV:|Om>裃=D9`o5;YmԨQ /k=ҥK۷wi9JQ}ѡ &)48-KN޽[暙pa,6lqЬY`̘1n\Asn֯ͻ"EZf_; Ǐx`„ y,"3kffffffffffffffff&ӧ dy6 sJMJ,)PR]pj[Pp+Vpf,UJgϞn&|֮];駟`ٲe'x~Vq|@SF;wtϞ=;߿0|`ѢEffI\3333333333333333.U LOukr`Ϗ>ACm}~!>}eY f,Y-{9se˗//tw?餓 [nFn;6Xte~&`\t%X`A[`.&8]t~ 6I[〶y?llfJ3kffffffffffffffffEqro߾AǎqnݺsV:SOޞqaƍG (SNygqc &NL42, f,F6'2dHosvn6m}/{=q6m~ҥKT믿~G}t?DZ>oqvZwn2 pC4s`ʔ)iwfͲ13333333333339Em{}9>h0qĠXbeθ{?  5j4B ~qz_|6?[`eY\hDKFE]|O<_'{n%oou}hܹs{̝z /0뮻:]vu3Vs?TBEKY,n.] .O SO=U .o:ލDo9 ,s,'\&xzA+_|PbGqkn9o߾nb ;oX`u.䒠e˖΅ˀ`РA6ft]ynF0uq(UV~~|dCժU-Z8P?d!wo *U*}衇;}&g۷w?y'c\KBi&w +N?>q[ȜocZRJФIW~w|sΉQ;u#U^PN[nqeUzu)fffffffffffff&h">J/b&{I'n (6k[ PO\AXR `33̳ phF%\qAZ:0ԓ(?-W_}aӧO ┚ؒ%KS:xpE2R/rpdBW1-v뭷:e%78dϜɶ<޽n.,@ y 5،3K/uY&ƍ5ktٓ2,Żzw4>&)Cý-Z긑ږ(Q %.&zML ;=N{neK}G333333333333,4mڴqQ7a0ZZ=C8@lYpeTrJJٰaCкusuu) xU[.cx€m6uPiJXbŊdn2d;`>l@^rQmATT\ >C`˖-\Tm/L42,,\ 䖠JoKROҖ: %#ߛ7o@ Z~xW+sY5?J*KFT]]Uc+qo9{dzKcϑwl|& |@Æ s2k H_x7+*'+ v"p[:TbȠ,Bϵx2e\[tg~ؘQn|.~1Ӧ|ɡ@wy@0.9PL"ksm'Q>.&R_N:~ENS='32űY2?[ʲ7 9)8,X@L<xp`ﴅCY4Y~XȊ?#8p*ç#&5Y] >w& z5]sp \XT ,}iH'QzլcX?P+P(QRW|7ܹslz(?܍O*2xo(^xȵN6nCp+O ޺zPnetjx6ttq8AR$? mșa(E{Ŧ5qɓ}2VFԪmN;U"ɓ'o~>-ԫ(Fi +W2^SyRxT˩5|ĢJu[\Qd;1 wSM b$-&%F;&[񥜓ʎbмNA*gf|r/`9~@ [BzFUВv5IK^ 5l1zZAfeݧGud\p(:n/uSma!<ثGhwbƍsZ7_h~[- rL3|wb#V&(G@' 3`fv$\-~L"s#Ǎ1Da5lT-UOPQPrDzO9g}6;rK}Ze;lʤ P11ĖU~|{) aEZRbۅB ы2# &u+!NI U\r 8n'}F+ UFF &ǕM7'g;ws@7P2>NEI~otK@utn&N B~KnNv@3 >QęgL`Fr3#Lf +7J,+.*c\rP}٠,*oߔ=ėwwHqpw!x&fS)JĂ4Ry[8p{凅C Yg!+>#apF%G 67uQ[ ZAt蝹K|g|H3,՞p(R\ {~UlڴA^ (EzJ?5-0P \Δ?q3' O?\6ɵi9sr1U~oYj%C(F8 q`v_Q1mJ*Q~))RqmuV/:"Jo.\Ǣ&u q A}pL2S+R:sL7CCǑK./9qBIlDT<,duPG꫖Z iզZY8вu[ܺU-[--XкUVK,dv`FmlՊbp͏b3t,h1P0$>T=ޘ߲EZ6o6|0|EATH;`H1h, ,fg p$c s)x Z5TYYX9 szHK`́" U *\k;ܳ{Nt@`5]!np` kL`~VQ}A ߌ ̒KW\q n&В/V p7T<K/ V)]|+*`$+JN`5[@)_@/ $~@N <פ.{!0Qo "6N,:`Z<_+ 8JxVhڥLpǧqtyO~\Q\ vept(a)ϔ;(sY|%<7&ϗ{;e2O9= P'?^&8AE1,oߎ;s>k(`0wǿsff X|ɸ^"Ͱ|)9_"M1"EYj] AWf8prΒ(f8JʎY k1Ãnﺁ O*N%n#g?8k黣2 Pe@95.p8ިH4.Uϓ~gBVuƾQ#1D-^w5f RdaW3],dIx@Ch3\NԣB&1̍:<>S3<(w=p Wp>!V"::ֻyšڽ|=R+/wop%M̎h%딄R G-x P ɃS> ]%Hbz.+ 2EulS׿xs `%H4NgOOK F6DlJ:v*>f^K`&⣖47hS|JQ8M็y#(RYʟF[pE&P<ހͫuSZtSo(ͳB ̀ T"o`mvYIӵk 5ea@1i$ _跞 ^)j.$NќKj9UV*2D;O˼n *&K(%3cypCD?N܅T-?\PYT{r P'fL`qxЕ( 9v9\{VSC,ZL骦2pN(ꝹHݩ!q?͛7%~j{z .@l/ `E+0_+X/KgvϺpXfιܗt7)vP s%r}7m.f5~'7zNK= -T PLu/bw$,}UP V̜ Y )=X0/ WhկK-LX&lZNبY=Um `!k=;U{.|k? w2+'H&m:k ,^{}G.\MvO:y '>ձ{8ҍ~ʰd {*X5I9\A|afv\0h\X~7#o0Q%po^Ԅ/Pr+98 VAHp )&O5F T )֐ w#j瑊Aed| om>U} ĥ(  6Jk)!y)`Ք)Q@2u@Z. E'NÀF'ȥ{`pMRSO6E 9-+vg uteg .+/Ww6s/B4sm? _g=K.,nw8*cc+?'`>l׸_;oKH| .J7, UVP&T\viRb֭[?Ptij.@ykS<d }Q.D k-;kY@Z}J Yz_J%_XZs6p-8gKfU\B] X{_([  ,/X/W[X0Ci/KS\-YW3ɇV-<<վc9@NV3zV;ɻ\(#;MvRq:RU[t`tnuKNKMW\qʧȭgA0}Z+`6DL`.Sm?!RLcf J2g"RzXۼ}Ϥ=ʋMi\E<ퟸS`9n0ٟW]@yF革V) U:ﯙYQQU1u`)j[f G7x) t-#h^m@[ u'Pc3I5u9AKTR TZ'duNRxiBQC*eCI' ]40']T!P]J$ve OM`!eL =Wњ(i!ant@͂ +7nnNؼ&j,ddYp9%$@p W ,}exx+<\!˶3po4kvD\Vz|P=1ZKعӻHzMs $zNu/^kdx]<ӸW絮j1|W["`5q>c:U'4i}p1L5ePEx5Moh'c@sAy{؃lc%w af ]a0 @5&@ZfXT"rW߅o֍Ի|`)3T ƌs*t-UjpMA9Tre rgڽ{w%Z{@\f@,]8}&-ǤVkfA7kpQѰYFQG}ܙ+pjCƄ&>nF\3Ym@b K -=gWDj&SKZLT1L'݈ W{+p 땂,x&||̛we(]vp{n?P;\'r(A@}O`YqSqdmLU}8 p)p(F^< =ǮQ:\~m۶p_r Skϥ2}л=zEݝ߬'Zlf/'>X0SL]d2E)]ߕJoW;\S6@/ :\Cq PZsUq] =. P z-UݦJf R/:W׿th.gEg[a61 \|=X*3A*p `Cvư^UGR5խ\3X^KIx2{SWɏh͸ RBl|o|X(z{=1bԝ\"??. cNձx쇃[1V}KTl>T3/ 8KweRMxcp 4$ vn)=o~?"@oBmvS F,؊,S@eRl?k|Sczg BMgDŅ?XO^UUʎ@[ p{YM; h>~9*?^~|Xz+q (uvafH\P\+xsnsN gkŽ;:kT~q|SJ 䙨;gǢo|Tޱw R7~c:33gb J\fZ5$uաbGS ׫ Mu`lOԭ㤼]"Vǜ&b\Gq0lҡ]$ܹ:'KIt8_A½_6ff+WR lJS6*wq?5̧.4j5hh69tڸR8fxY 5k\3;Z׭Sݞa nvTfpy2$K8#?GҴ.A\d@ 4TIZUz~5ߢ,j ^u*. P֥p%J࢘1ǀN L|Rඎu}ĭgֆM>dnG6"K 儠קR⾪AqK!`S,&j;=xL`kདྷ/Z׉?PW϶)@c *. pqMr nפChVsS&ِc&|v_>< ~1׮93>KVYZ.\,j ,ȨW,lZ.>nY>Voyp١J?FS[,T4j8nH9mhgblTOf&xUV=܌o[QŖ M\`0 \@u sm=%pU8 \~GaL3o7D n,e7lA?.ƽn\'PDޟ^̘֔6M~Nj| `4q#B?W$@@i h#~uAMRU: {ࢠy \z(oPsO uʷV g(%/|*Ϭ1dE @Yg5|M0`S.kVY V`m )>2e2 6R{.FKkbuӽK[<˂.I~ -ջ|dEɃx*pz7L(ti {o*'_)cͳl_b ʡ^lD >UM,-,mA58W7]_^1=R^8UuTcy/[+H.U3Wl^d}Q-c i+hƇ5:U_ oUjYF5;0_514ӖW/_qT1塢e6fp Z`nukwS.7_p f/u,PUViwc6R4 Ng H&:;AU- 0+./:ğ^EqݿCTK\$e9|jn|6@pSrnՆSFDk rw̗r\@hj.PP4U f&`RNfs5p7rG\]] WZ_ݽ#wF,sf\@8پH<Yʞ [ |R>1AKpׯ_ -. (^ P`.JW />9n6#B.Z_(gyt~5ΖWRicYF{59*^(tQRsTa]}<*af1( 3"5Y BtQvĝ >r5kƋO%c\  >nQ>v8ukH7X89`T_Q r)?W,b<jC2t>t}˰A@(` <w_x|4LDP^pa w1ށ{ f9c+q 0\R@q=:~ @`?)%碌lbUkzDn\ jf@潩[A2FnT0%:!T}~iD~(ɽNxny&f.P@q;;`[ ẋiQ&X&_~q ,룔Ws^'qe.,'!Ļk1ƻ23ſ2Q]Uet֭X9s]]ݥcy:R:7At}v7& F][)i]}!ƚs|w}iT oۄ?q qp `lm)_GTp{,2-A,CUP)`d' @ ZTKFvN|P 2(+#'%6e (bSr MgHߠupZs43.q`#wkaDe֊"z3wk^P{8ŭ"~fq)o2]f0)W~9]br{xQ hu|CttzVx(iǝՔS; ߎx:FN q}NcgRP?#3"UNRo.gx{>Q4^hqjfQQQ@R Kw*#`* \*J~>GU v)|*A-`.pY&wr@,P̹X*y\?MI;|zD\ 5k\3CuX"_#խ5kv$" ܲ{? &9hRxAp1-vQ ^ R;F;,gRJbdu**\~W>RR jUX* <5O89Z)OqGi)4O|w xM/r\j@H ?zEO ,9+1osczO,ȣS^Ayz~!+8RcP@}TJ́܆srОr;i*_0*{+}%;E>FyfW@F-?O/Lzgz?.tz hz\ˋ2"T/JF7+6+-;J~\R>Კ>3M&{) |9;+:/T),uC=1Q 1Uf'8 W .@eM)x9đ}Pr/8Q{2no_7yo@7q:4 ^V+^rCo)6x1e8>đe\7=ryq/ q~T4^ċt>a܇"||d ċBZu%KāF3PViR]N2$Op-JKD5996Z΢~}G'%U= mKb# ҚT[wIH(qFD߂6|~])8۔}3խ\#h Ps4=_P*=-vpqLQ=O?L`i;QԊ1R{7;s6c1%&kTRYV 0qtS=ͩ)[|}fTP*B[R C/1{XJ3RT >XJo ir( %3߀IcqqAլ pmpJ.ෘUW}4:\QJ\^kו{`S";L*j@ytŏVψ4 ƆXn䁮щ2Dd\arwWyw*-7>X'!m>ݔseMp ˩Cse|MyeRޝ:fO9nfp=6)'*g8(^ɷ>W!VRZދ_gV\г eE@q&+^3I&k:?pBG`7i53;h2bƒHU|ҀViUpz %";?02jxT@S*FX 4)<01HyTH4$fP8>f\se-cQ@{x4O(G4~#]|WĕksK~{ct\7'Iy Ɓ4$@pMY@.rxO\#.lgjE}tTiICO0!EFEW8m@|]ߴ]FԍU/)ջI360@F}JQ3T[w\Éo3h}{L{k7?xHNuVn K-`_MukH3,3HyY(xGTu{Q#x<^pav1xz .pQ +.pVd1_q M`1uY~b`%h{QB n;M(@'PKl\+ iCio, (M@YQrm`(QL| F\A=u9:*] Klw3ޥB?7;)ǣ5 <.q^oDTƱYa0S!8_Ix5'"xw8<%T^\g 7'sU~P2%Rݺy֞KCX߽&eJW~rʱ//V)˥nMuAc4cl2eDYz};TҺv5 @|ufS?XDY f&7s Ź_1=[=> ch:IzNi[yC4a '0;2x!!(pH a*Ԯ !E' 4PsπK~'}ƘŹ7FG+5IS'ߐ_Xw"H#&Lp~sJ5aO*8ܛh<l7a2 > k>Yf6L=EMāM7K T>/&ii#D$T@`:H4{fuޜpI~M-k_3Ν33>aK: 'VoY _rڴ \u 6?8~-oYp]>9?8$⯑5J iXp4ٚ lU`mR>ѶE (*6aΝ}a⍿!`?kW9POӞM&.,3%.`o˴U~)kF|7M'=qƐ7Ltyw91cSXs\6b3 )i #_UݶJ8p L['sM "ǡ圙. :]Cql[ٛNw>ͥ}ži?/-af*YvQҷVԝ)JOH>w'n.JYTԎ*/&Sc{@O 2@;9W~lŘ_X5ȫbAےl4+@q(rm+l$r3@SP"A<{ƃđ6n \c&@p916?1m&ϔk{e7|?Z%ʧe4.T>R;Bځ jcbqi ˆx.;SJ/:Tĭ* pN]1n41:8cc@` p@~4n?ߧ]bR^/ 8c 9)5ca[gLG1aL>/bzě砾ʅ4)PN.3ʩf?>*]yz'PKQê_w!#|Y#-雑Ӈ$O z<\w,~.;]< T։gn=NlF^u3q!*E JGA|LBʫ YoR x(Pߦā6q+̲28cgj0R%*FM08" -RрVj\O\L?A5UQPBuު1%9(ʀJWIG1/#: X女UnpStݧQo)N\]kP#Jqk̫28N\q&=P,nҿPC_u)4Fe .yulSg%>5QyphWʟWIJ}UqG3j/7( 0sz8uė/𓻆:'9xqI:>pi#Gs6^3aѣ¯R.?6P՛}pđqaD9kg' K~upԘAᲕ3{-7껁ᯫgpq7|~nɣ!#}}p穣efcr>K:#M'y7+. o~jT]c`M:ԗLLQ1ҤSQaU/Q]_Np2^IicUW^UGOT<MTPK10"Aވ#R4?NmI]_uy#m]cFt􉛟;M}`#RxV{Oiؠ#+Fp{p¼5o}>*|P!a~Y~2ptXf_ qN8?/ ?UM8d`'1L%i(~']5&[i}ӗ>gő~7ե->]sOO}=ݗ~2({5 早ܟZ?'N=z-rIW9 _8l2e@L[ߌ/C!o-AY rFzig Hx)\%/`gE (E%Ƹ6cN50f9w $<|hUCC,PM`\ʤED4`9<?*} zK!{QHF1IJą~}>R{ro60|亲$l"NQRHK91cO|pQP;HYi]3b D%`^UQ&oE&͔u150[ݗOc0LP[ y}Xw@/~Oe~5@(yM:#ςwgK(P"N,"zq!n?n u1һ#qw1cײ{RFąljBλdY\*n)*B x9o^p$zs4tP JA!d 5*4x%gLPoAD\tun8TO\|҃Po`旴y:@KH]tL|q^ʃ݊bJ +* j+e)ܽU%1 KPQSUeoRi\GJĸ7y^׼O<v)kG̪{cbL,X;(`أ1h711!{ޥ#tTDEw3˽[`Yyw=g̜3{s9sT7\*݆Y3 ܒ?u_U5ׯuJo;۵U3~*W:Fd;/jc[}9rKC׳]8ucAƌ ߫_oņ(1V3DsD0> ʩ̛>m1e%©BfP>ȏ\F\*XP{R=w-w:m%\E7ly~:uݛY>o(iIOgH/\߽UVMʀi^$i*s]˃k>:\|Ck6no:y;svN6^٬mAux߬C? uΊn*c|ʬwì{BmWN 7}n2?[ԯ?fR@5\tI Pժ4Z ͊E62%ЯaL^ (U0@ `U(@!VTA3$|('>&x,Acy<U^);K' Z"n3FRT{pbDLH1O@H"hIQWxJY@GUorr 6H~7\˵wHPF{RF^mp'Oxl'=$'K{[ๆS8`` zkQ*6G:ΜO^TzuyRv_wG9.-6t$fu\+V>Ys7xi-Q|G&u9ڏ4* +=i0gL$iƮbԅ+D3GG Ԁt+1@Qp:t{ rj^RH 73E:\9pq~hf+ֹ괷r-muoFX1P}F=~3]}=Sw7ReGi꠿ѯSGA9_c~Oǯgr1FR/ e{(1ڕq]]{9mz\OH۷dB ں}G*vFҨu;jR[.iv=۹Z?XQ@>myX&̌/ 2N3YnKW#D"9}ޱS(Z֠թ۠q_鷖'שZV]gV\3ڸ**+y ~wz:QɻAʉpjħV]z-5HkֱlP7e^ 7A`>f$evoEIu`H)'sZ\&%0@ xl6~'A %u\~ aef\O̤~9 ߜ\ )G {><_ėu mx.t'?s2_ߔ pOGw>hr7>w9 ߜ$Md.k]㟗3r\`~W@p4| 8 P&0䆗cr 7( p>qO9#ڛc;&O`Nhs>c"K]$jGqRqoշ^h"c왇MnpHu V/Wf.O*1fxЭ8y>u0 W=h3)zڠ@BFg!P8H:GԘmSpz* tr|<.Q9'Nvm<oՀGꁨ$#ΧåSe cby7#O;(#18|hh9AOt :\{PtX ʣg`Sӥ[})ߑum *O2he)G-Y xd@u Z"#mfZt׮sSW[ѡuTܚYwb>4ME"+}k4li)oVe;&1"8UXAt^ &jЮ޸`.X9%wsBSuW.Z8ֻgCSI+/1DN߄#BrloG>~_le,Sp/qj p$s*N^ƳK`lYc=*L2p[p<#CYyώZ5Q^~1SYY @րX&X5JL[ ݏ5dh?.q- nGQj61m5f|zqSM:u8ԥ"V. 0bK]>v 0ȊWn.mw}W9F(Fm{9=]@12 )nYΦ .q '\wѩV+(zй:2:/RPU+vh,` xR*/RzmŹy6@: hLspo7 &8:' @XAѲ >s_[xq;orZ]3w5#l=h@oCEΑf-nZ5&]7^n2p]5,wGݐMܰA6;lL_I|$/4n cATZ~%K{9+>V:uoajsE*ת\| "Qi;ʆ/?T71Kp@ WMnMPm3rmWڢ .Q1lʲM<8Rvce"SXI׺c_*K>{C}qٟ2v0˸kH1f%/m`Սnں' `' 8$Jv $7n+>wnnߟdX߼SdM[n߬c~%*u[3Kdۊ\ [0y.ڼDؽ7}y6d\N]iM(S-Jlnb[U[|4&}ﶙKZ/;ia<`hH3ݞ(D 6x /EEzXcVT=`'xX D@{`1wd NS4g&6/0 *R= άe1\<$p?C2`* 1_N^刟Ejrs8i# 5~m">1%ОM-Căk].uԅ~  ?|}/^7^cWfݛ6԰~u.u:lEv.0L9$#}AvzٶScPpqM6l r c#1{I*O?_T%˄&}7dƔ Lc<\\q%"8I9@d*N:y#RV廎8ꃼ6&McfcyBqGyƜLFBup@G ٨EȾZE:IKudYZ6@ pΚᧇDrMsVgl ovm5=X6CѯmҾ]$ rDZVl\&gml7X=?kJq!57x %W+d`Z/-cM% 2ܞɃ ,/30 :Vӷ [ArV X--d ձ7 .0([D8+>:ǹNéE XS{Ypbίe>#3 DAcb]N h(ϵ^wV7SC;הERG˓Wg Ҟ~0UCmc}TrԎ1WKt qe^XuڎdZڍH\;uCgnO!.XdWMHrE6j6Tco$@$12 @]~wQF`=Z16C\ʽfSkMڈp暾ZgYਖ਼j|dp(V?9cs4vU+\XB1"'Kn^6& ઎ȹϊ4eXXz1؈P:]<$;Mr+nQ/';o7}[_U}+Y8he7J-[nG7GMɀMFcNtsUknlW.@򍙼ȎErhdנ,Wn{)O} jrQSLnxSR*R&"q ,kIZqw:梏+:'=oS;vk}5`+Cv9dD*71$X.+Y cb`E xР$%l &z\9`dNq +n V$ЭֻM--QDܚci0$׵t:>5;ԁn@,ѾHpj4lf,ܠl<\>^Q-Yg!ۛE>c |OAXqgvh\ .GvEnA @G1 5Ig]&xe4Y}s˯d` h+[O|ٷHnDkV IB%J\ 4;p>BzaF[*"5]&mv͂O^E.+D*Rr\s :%!šΠG7Ѕ` b"cX)Mo6$#zqm5.u]B\R[RNwh5f`BУ"si57~شNDe@O&OrMEQxOo"py˚4I5 O3F wǿ< R Ym-]0nAhn{f=(OLMz'K\_Z26|irSFtkkR-׺}ټw{v?Ofڀ10/I^G{Ц~M-Zr\JwN%!@-Iprֿ(Rnז$91 )U.'|0> ,F3Ry(@RJhmpܩ+ еOK$Pfp<'S;=:/C\U{[X%[M\Kp8%-`͚NY{[?җ*[zT];G֤rzT9e}Gm{ErlApݺkR}?u{!-5 "E<,e@\Ar`X`Y414;CFnpc?dBB!}PLZ?̲ }@_ҿi 'U+Gf̋dx>(!',hJ=rHݣCT/ [)|rҺ="e-sqBm*)xp,7pw$ڕK pfnA8 EZKw̕V\ڃFMZ`|d${E ?7 Ld|. k#W1DTC- ,X`S ( nUm| 0$T`#3֊ pq'#QU8:..aY`2 XsMl|DP?) 863p吷3sQ9^3iEᣄY7mH/]hMbN#x 4DFm<[mu\.@Z v8`obvrכ5") _ E.8wYwx{\>oVӪZuhc-=ݻwTŠIH&>^`K9^݆3RQQζC:Ty-Z@:%vB6;!_~cǡpnrг]\;$)ln ˤ*:x=.0qgѬ"im6\܏2n|\ i`Y.|H+Qű\CG.dnf+ |=I>(y,`R}-鷫Ufӭ16zH95{+Tר;Iu}^6}c@f@ .ٸ-v|>{J(]m;@*NLRQݻZp&oy%Z-&.pzL>,z%[aUZ*)j޶ ps<"p?xvnJoהAV/" pL+ #ꣾxL^1T=,H < $#g<#S"HG5 fEݾLOG$npYKMN` D ,XFK,bͲpjHBZ\G`&k_G8G9wʲ}P.3LrH_rna{:ѳYdT88ݬY3s28*=|OYUƁI p~)ck -KY`S5"xφeE' 0Z[SDr6JN73 3Fʷfn<%9&u2}Q@P4cюb3nZ5ܦ?71#-HhW9 -0ǹhzI.0{nQ ,Β$u3KS:\dpkfܩ]hSꕟM6lF [u[vҢvj>|-  Z~~hLGPU("<Él_o|AK&hf$~W#I4ku[/,X`;L",d0ʼnĩc5ŋ7*i/wzⰢ9 \0Z ۰aCy^F@QBGr2\-#?6qy9>0")L})X3_* euEY xx衇҈^~+~GuD @kg*O 17hDZ+UuBq\K[ x(ܝˀÆf^ )-, P'8&e>¶wѹ^yn)9Bܢ;r\>}IX ps(@u1p]ff r)cp) IZy뺹&-j:DJ 9*?+-"~i`" 0UƕoH3kXfUV>>stEj|j#p5I>ƭp)+HuO/#9+j)ln)OW=>`+&emu$@c{lUL(+NY@, G2Xn. `N\MpwH%km/tTǐvL' P wؘ .Ҧ] S9F3I3hSϴ`S]MLOsZ'ь16kԬf~]Ng}۬U]j\5su ]_ ^чYcV9WǍCN4 cP&M,*JbK/7<׈-LK೟@m㼼ƒ(?& cDF|b{5e E~b3s'ϊ2MϊJ\veҨ~9q[&[e7ۘfVLs|;W$j\eY;iI4n96OYn/h5[т/L v(FzҞ|Ic z̼< ,X x'S/.Q-Ie)&>{Ųڵtiѽʼn!:/:V9).δ-^@jf% ^9|0QH-M41 %Hay|A\@ɠ#LG7){ӗ 6k>laNG)PL?SR(/&x}t$p&$> h{XПs*f!܎ԏ~rbX1o?Co1\Z_^q_j+kN {Fk{1s.ccZ/7m,F|8ώE+٨ fdl5`9fnENζYi7,WBטo9$CZW)^87{lN$%@0W\,Gsm6kU6n`ɍk/4#|* =cշ-Vv7D`(&҈\6/Q1qY]uUiwq0-ъ;n=MޥWwO`h2^A.9m[Gi  pg+ЀSyG(v{!Up">[yTcZ":Xp)yI%gZ"SF:?dv:,u g/sY$'yP9e| ӉpZNmFpª 2u$H0Trm:ɴk<frA>q$>T]DW=]6:q vg=P߽Htߔ>Tl2m D&MQn6ڱ6l2d2t߯V5k\%i܀Й p mQ8pXyX^qiAn"p60ڱA=7wO+>}8T2!@94jcD?Dpz;6@@ .ѵwKV0P A^LƠFn]p3! %Ll}8&g3|`W[zYzsW+>msr&7djű9稯BG>֫~:u F8K?O]ۏ_WylZ߯ƀhRh3Cf& D'on2yڈ<4QwtƃAz?kEʏp  Ƀ1(LmQOGJv`au>5iJ/д} Z@AՖ]}|m\ikȔ6 V] $m5)u?Uf;p 8/h<@ `i\Eͥpp[F8`,UP'y ĕέSz5wkEɛDzx5n5UW y` % iD"}7+]{㴬Y[D+2NƏD1jg}zXC?i!ІC%*;L"@5jHEsɃ'\|I{izhs=iӟv,~QGePf^Jz2@Bq/ʒ~^y.7[\2rsZϱ^rFzY!Y`@ը8þ-5I|E=_h6[Int^wrvuA p pUksCG7j;uoazMRͺkwH K+ṯ/%[=茤e6ʘ` k]f>nnW$=e#k*CӜUx(GWėC{xTZ[Ưj@04q/@5,_1`e %1E>9@cJ M.ߢw&K.-RjQ9@X 6=)=oTިX`& `2)] @r|&mloTvSMOoLڱx}"k@NOEyZ>/yBLun~dV|Fs-tp7IKp}ωiv`rN)}Ҽu;hW^1GͫذoPrJL~,lV½Fd.Ж~c!M[ nA[`#R=+iN?SO=e)tҬ8_B@lS94DUg];`@oeNe@7eAn:qsc)8mBA_>ڢVFy,`Ϩ;͠5ynl^ڃ6c,=ao5eMZg2&Vw}IN,D;p R{(F;"j\@=mN=h?(o<)7}o"Z ]իg5mArq7*퇬/ԛoE,~',: 踜$ >"v&ku(ܙYy![0}0_ )Sޱk}8AZ­qz'=cpӲ[?szO?_ n}M>z"ȣ"pѺmʚE-ؼlE.@ᮽ[f_۲Mx(X)# t1ڦly-c~y#vjl?;an}R}-N.2CQ+V4У~ޫ/^|0Cz `oD*,o+ ~Z}:x+uѶ#4\}vI?J(DQG&o?TƖE'Pq (dcYu[.ǨQ . zwku`ndMoPap̜O:W" ھHmǽ7mYX@`|'@ Ps#|$1LYyF( K/4ۣ@SaQ k)S."~O sVՈe>MJ>_~cڄπg DM;nUԶU&hGS9W+Xͳ&s)+עGY8ߚr=? r< xgKpM~Ls-[/~Ewڔvxo.D ,g}vhƥI7LIːu{m<|l)@\x,w(HD1)^fTP@՗g۰lKeVK_~ڠL+ơp>C5cMn3} YlVs{ky?X?N9o^s|^јtG2ffLүӷFE_Зad+gc,@Ǘ|D 0@\ u|FpQдk(Ybčtps:~;uVl%޴pݼ u\*adt~rGy?&UYaXqzùYnƀb;zj8) fY}kLuoo5 ͇ ÈħΩ?90 gDc!헔{H a;_{H9%‿ulK%Ѹ"A 9P rS ˒}ʬ|ΔYA׭,<2Ns&, DS_z# x뭷%}.CY)% ŘShz, b')u̗J9bs~oHBgɟ{F`|ꫯ6a&ɳ]uyu* Lǥ=; *W1Xac(K4F*Q*:"?uT_N+V,BDRr<[x@/ةzdp.IЕ*Uj.:uxO.VJt-pQ l~w$E>$9 8{x/oe,meܱK=T҃3ʦ{5QPjV/QE'UQ6~K-dHaD(Q cIʢ.K,h=nShݮy`iHu 7XA\"6էX]IƎ͈o?gGƥe':&@-OS;J;X2uXJ7N@%7LK,M,VdI6"V4[no>Emqŋ0K(/&}{wsN@i?2% k[_[eX@ӾNR ZIze/G,]MbF:oo;sϵ(s"B U{FeFuv3&=[HVndf Tkj:o?ϧv/?}t+o r?דּbg[_@65~p7Ю @9$E`,}l1((Nv^m|ǚeM~ǖ]d[rE`Bn+jhߪOd(lx}7VY0U,(Z+:-?OȆl7%Ȟ>F#i Zޞ^&ReQlƒx{PK~L%}y&-Kh jXqQ]!DL1~"2"=WH`q&OaA bMhkL:S^L;_E(T>gi]d"Wʿyw] ;ߨV]Gm˪R^% 1 y |:WIRbWL}X2 6#3܀6QjK =(_{ UM@Vן˥{Y]'AmMشEJ&Hm҂bbǰŠ{^ЖWu/o3`);b7r]ѵwRBrݨm俩}/W}ymp߂ `ٍH#"04lьc$v8![ya[n`{k~'n9Y.\ˮ!k2p4SS,e8B} ?]i&٤ 3X%?sl]ڴڅ!;{؅F0tpOڏIBi @z´,G"GDd~]{D,P$tL=M̅/ڱQ=+H)id؟"- 5x @~lF.S63^w`C7Eܞ㡺/so%\oD);>';9_ՃupI:K{  YI QQ~w@_YZ@m~WirR6 ,X(KgPujj6R>S-E7=nHn`i.Ҡ{'G2vI_KϼTֽ^vFʨuRH+L+M%D *rspѿ]D޲r//lcE$"R ,s" z[L7T[69P6Dcȷa~\Ev xZi޽{pA묐sMQ}ȗ%*m"&Sa CS}X$ݰYV."uei ې.`zgOQL<delT2ʹ{<EC8yx^έ@X6(Vn{Fy:i " V^=LBwD ^r=4eWm ]EHhT+~ ,B67c9o~Kmj nH;܍l(J*vu:Ino%h݆n7@ `RI).6b ^VS5)U&3@4)>R𬏢A-/6"mWEqƈi >pUqy$E>1hjr  z,g)+AsyIAUnv"`H {ainJ$a؜"xj{,-Qǘ`$QD1to/2zސTzu_W*qTa?$V)HX݋A8Mmua ڦ4d-p U!'IRhJp_MЪkٲjrHKuEݮ Q!,ܢpZDH)EmLiw&>wA@8AV@1pKy1\ lK`r#@ߩ.#}:PiHbR1q}6>}:P4M[/H<i^-@iuИv6`_PV}xhԫds4DՕhZ%>Sm{`-cwy|:' X8+HES;]{6e'' CN\}{d{IE(f]{@Qѥ pUtl4M] H|*.#i,]6 >I(3 ߺ6!MRA È6M^gF:vH֟ $Zh\@'m)A مy/7Ѭ( @ XEzƒN]k" ~KtiG&/gՌݹ\@'{pї%2Wac:,Y}l 29&j࢏708 ,X )ܐ v[~3~m}uLh_VTv2>]qunԤuR-WpI.};K6tӼ^`|Gm[@Q(QXlL iղ| .jCUjGuI hSOib1%15&)c1me%b,D*B:ל6JY3!NQ;|Ami|Silno,.D"u6h 4T.jZluRjOkw_G`Eb#:c*Klٲ&i~UDK&[Yg裏.7 ~z XWuzNi d!'Nw61 ,X )ܐ vPQDN+ׯ?OK2ifxp Mܹku;:@Ő 1&܀D*N[D!)u&:xW^1 xET% A!YA\ƣț4z|}|[[d˄eÆ ѿQY">+:VVu,56p@bV̩r&Vr,G+ʋWr|2T )`w^*QeБrr A͂#g϶sQ9NN},%9[s"u ̎3&QƋč"^/JpØҾ}{1"G5PחdFI%U0[g)uTi9d y^h"EFTM"A]@̴%*{O`feFj_hѣUD əɳR/' i 9PLceg<[Q+ 2XѺ3=l*j ;$V5J@c4EX{JWYwէd)^}U3UeE9cYXyj{.iŏ\_Z9K~/sNr`j)EH!^P4C~;  *H,y!J,%BG;Rr#M"nҥBdR=6m4eٸ{67O,3Ӻ#Ytz%ZHʄKM&y_gf_\%,+Kb41t_u|73!O,s )cD\ƨ:tr־@C 3RE1 L]Br/&I!*@ X;0RRiH!Jy?HOt> 8$2g/HUTy4# 6Q: Qw(|q"QMYNoц!6jρ z'~ OU>nǺW~5M$l*E"сm- #ᴑD"q-ɧmg=܈4Ѻ~Q`+-ΜmF4w?׮:eln!CX{Lm4RH6n[>߱n5@.+!Νk,s$VD5pNtZltӍ#/JnVP )JpvezGe&e"%8PhXNK :  %HGlܸq(" ՘ D #S1هi9HS \YK+'V]Em ī+wq&,g0 "W'+LV6ʨ*=0?H OLQ,Vx> HV@Er_e}0eNfH QO 6Kϔ*cyF98^.,-0SqطtnD-+(?a X孍jŋt& ! @K~UʯC#bԞ*j7〷j3~TIlǽ" Y &Uo<5u=1]V/?z苸njլ=<|5[QϭzMqbS(h|vy%VEqS߻/yhj& ,6KUWx 8֍҅!T?:xqq[i'+p}nА =WWעEF<+]Rt'f$X7~Og%گ;+>!~_+nWF0[/, vX\t]#TC4Heӊ, @FX.fАI`Cd6y0tD"2|ts/_6}ta .5z-ZdDvbD l,Dr=@;ࣖ[ԯ[eHe Eށʀ=lݺ5W!8\9Δ0I?Ѿ\7(/EڲЍ!qf1v"XiW6MXq ac5]o\Z-[ =X1| ئ 'DCvYAWkg6!B=KQL-) Njpit]zHK"څ1E{ߴ)v6a <ڏ@.[_W ,X|L.D?РIJ% -,;V7]{->[|Cqenr7xmH!bܱWW$^=U`ϗMQ%ÒREQo) /y˸J_RH˝Fcim`XuEʦYҒUHр@Rt^DSY.߀8"1YuMԽ76$ړ?0KDf |t+0&]ݻwO۾}Emx{!@\"?L?5HA"yh}MKd)+m6˃RMU@'53 Lt.eb.j"v(SƹoFxc`c8bߩ]P0s-Tm{6qje@K+0Xrg&Ei#=Ɖ̦Ki_~S^A*F%am=%3+ @uh[-6mjPM(R&"m.N]&#M!}@@m4z O4ɢgmRsEs.mC,X`6fۘMTutgJ.B*$gsN+dRWX4JW]]K/-y,u%EH!j%?ߖC iӐx_%k}O:tQH;-ѺKirok^{L""J^ty%ⲒW\V" 5]~%'l(&|D͚5+8KeD@X"hD6Ι3*DMH aeD=RՈR_r SL1HG-v8ZrR6`&\^ &/DDZ- lR7GFuֹG9w%HY"nbS>`i׮]<q>e\3pXuiΆiӂ[Զ#5V?qa? 04 wr8Nr-~33Ѻ(@9҆HU'16_Xh&/T瞐o$`͛7~/Qre;iw~u9)XN=<fb X`g΋N> 31#-Ѧ2_~y){_LH!n" `D$ +r:V#joXVcӥgDf"ld,+2!Tx~rxN9X;t Y~DqNl|[&hl݊}9o,X`X&Αף)NZce&==1ìTMMB*D%3 q&&XJłN0w-)'<ӤRL+*qlvJQ8\À9e (عsI+ 7n8 Q"oo5E(ψ"&" ,>B\tЎ!RH=88A==x$N >L,Od@-!땖 '`}-Y$ܔ`nj.ہ\t^_$1{-_ё ,X` ,X`dWݬ0[ [3йuLKժU]&-lԁ>KE I(L:ciwrNcWi磷+; ppec66{wm/M67+I`tmi ,X` lҥi6x3ĈюO*"6xFU?nݺg쨍iw$+-ܒno=qL;wpC vXY ,X` ,X]ـILL"mՈ0"P5qWלqPuQ&,WA$HPTB]v3 3+z뭷UVVC+H"tI-5ƘU \c1c1 9ꈆ% V6tի/&a)}g={W>;'M.**Udw4an?Sc*1c1cСCcκ.]z(̟?i"-E͓Lꫯ5y6ʁۻpJڵڵ{WM~ٳgn)?.{РAc̪1c1c(X"n aٰ[Z^{7y~Ig]/Rhٲe̯Kto_o15c1cb\aԨQo*Z.KEݾLmcڄO>$zq2d1Ƙű5c1ccTl^{6m !o<]{ KJ .oc1c1c ?~|%/-pcǎZk劾UDɅή]f$BnFn1Kc1c1 VIqp-Gyۚ5oXܷkFV#R&<1wРA5Ƙec1c1@ JV+ )oSYW]u D;N`²~8;cc1c1qܛn)u]L.vvm?TH?DÚ:ibAa1fq,p1c1'+={F m۶%<6[Ze˶o~d=X};|Ic)1c1cI.p9]5JMަje[j5|-ȋ{7F+x cc1cYř4iRkó> LݠM6?RLE;go,멧 wyg,p1c) 6 W_;cɃ{N;.pA6tPv|Nks={k Ek묳Nv6nٳ>i/{cF[cYe!ʃy83 w\hԨQG zb[woTYfv9Ϯ[ -[k]s5a„ sS \c1$\oDHVfN,nC=N;pguC>3O?b4Gvu׌MA.K/ _~y|Oa_ؘDb8cL!Λ7/HDDxA*ܶIN:6ڻ|{JCjժ4Y|c9 INc ak1f[nex7Eq;#8"F$_~e2dHl` 80>.Oq2v8a¨Qˆ##ύ92|q208qb|/ץK 9S%$x\rI8CvmjiM:5\q/K.DJdWSk1&..+.QV -\¢Eeh֬l*H|C$[ (eQ=È}_n)W_}uovGؐp@E1Km;eʔ=F;1y#^}+2F2}j1cL} O2ߎ"V '|6d3 Q2Fe ,ԹqJ1_ra .ԹoTfs7.(raݜHRI rwQzΰ?qt%q+jvB sU U2*W1JU$)h!r󻆎{,F*B7L6-vNϝ;?R1c)%ʕΜ93t1>7f̘ O?tA,bof̘WxN77H0P|.;'Bƥ19}t>̆ .aUdmV2zRV>LPɐOUi{iuU7:"ڲ:ovHqY!e5׬9?Pӏ/s=o-6!<H aR3tyGh 3*\JV[wu]\+J)o\3NmF1!id1cY&֧!~*4mCMyzMC)u_,! yN=Ԙ;uu_(7$-X+(US. \Eߗ$J}$ 6$f>Ui|Oix6:o恵HR@̒`NChH?،a"wlYK[ouh׮]̽[kYС(/1'prVɁLb 7 [ tF~8#,2fVm8*FeݳR/6@cEn3䓯yL'Fղ5Ƙc* "̀&:T0?G7Jg=ٕ%yǪQݔΠn}^z7JRk?&p]XmԬ<" nVg}XEO egpה 믿b"OS˜=8fՅHwqG=;U }iF"rohܸ]b3!ie=W RS6XcY%!ս{Au:6w5(b5csEf߭!yEdf{,1y饗Q7}W\=VZR"eg^x\х<7`$r){чZ4EOD+8+|30zMCY%~NڶAmHqBXrה7fI{+<4i½7W;h$LV L^o&;1Ɣ-cV9 ˆApΟ?? (+ѩh&U͑̍EqOKsELDK z,G fKL|H"r i aMiBh= `u[>84')b8nʱmj8"p d7t%3ZjlIDkg_ #F(,pM`k \S*#\2#(z\}4rh VvW%C\|e"zXcLbk1fcA7(p%G"WdeD2QAR*&Dyn')rv{7|JZh4vl_U)$85j8+'pWCJ k*A}Ef5!zu[FL^U#dd3H/0=M?A7pwC$%ѣG`r]wݕFAзo_P0+ 5oHbBPZ&]kTmݿ nIZb &NB޽ða*bk1e1ƘUԌ3h`mѱ]Dng. tc(Ob6(UJ Z < 3KB5 \&<:KyqUZ {;OBC2r੢[Dj.Loue3-,pM`k \SC Qhۍt/_|R"!v3\tL-^ox'B+bk1e1ƘUG\t%'՝(rsέN;'0R\hkWv3P=#: [cj]{QUD{DoofP0yupߠ Wp-rުX]>!|$jDlu 4Ab5.SOU@t-qmm)Aoוo|L0iҤ»ה 5š QLs1FjsՋAH܌ʓ GjSFTsc \c1\RM54?;h4Ibi0ymD1,5 pIgopɬo:) ^xJ]D- r:2F0JDc\xg5\hBjfl&)YKxqql/AXrה78T',2K{:erDfi`݀Wܿg͚U \c)[,p1ƬB`"EHY&A"WIiQxQW_}uH\dnUDxJ#*%"1W^1A۳gnaQ"#Lj}',&-,.d/Bޮ]5\4I\pM7G $hQfώ]%`k \SX_=|q25OD {it9پ}u$g/u?גFD#~x(y&dѣ4*<"Y'["0 O<B?ɑ;pGm-/qql#EC"ڙӧO0f̘&+>&NT6œ裏hN8!mp X^z)꫘xܸqVYG^d ¾>~Eῡuqێ8ŖOC: X>YANJ%OIX.uc)1꾨~3gN ^W0(wv8z) )]8Wh{c?͵krc{  \FRT6hwPCgybA6QJ%>rc9xR-p`C*|IC+T%t\rm( k#98y?y ^:!(\_N=Ԙ˽p/䒧{V77Mu^y򳒹r&mic֛sZ J{c \c1 49ظU>2d,Lݣ౤j%d."[ŔI"&r|D##f} !pTs|)@,(z8#Aa+h#(aH]@v #\l}4il(MXS{FeeK^f$j3z"@0 ҈/d?(F'!<mK%0hxtVm۶'˯?> V`1OdMR8۞/Dy \KsMM'?CY"y}BzA'ξQSlqǫpA DD\RXעN Hb=2u#N2(F'%Kr :6Mt,2c̈ ާ2~8s<"c!zVѡZWΛ7/K}]w&C=DBzp$mq/> +# =kj_G:ǐ_f\7)^XcLck1ƘX.8Mya[H S`ۙ|ˆ)/E!r +k{ +%zJR=+i{J}QZ@z$3DȢEjȎuQh ސ<H6D(IΊ"dq ՐߞSj ""D6:X6|IsZJ>}'؆]>@6C_]&2Gzg xlC!k}ߦZ$> a{$zm#C4Q؈ »8["Fz:;:ӎIԗX?R$o(X>2"RHlDu~פ68\*\tv:7q6Pg) :DlGST7H;8?$w6mvL&JsFLCEv{M"`1v]b#Yb-ÚnDH \QSN /ho 5{S$hhТ(oK_>Jv).xESאE^nZk>{ab*Kܒ-G|$Zę·!%W~}5jXy֏:ϟ &Mqi&Wd}Rt> ?9SGII@["u h&VR.|PxߊR%paBnpPt _C?f>'%\kR@RO:QgvG隵Zkq#bkz3MNԳt^]ZF`DQ :H%n5$Ht\jXW SXc18K`c7E =+!)DA$Y馛fbj"]e=PUgH=PkoZ' "H3QTav 7,XkY5uMqJ*M_c5g$l+|_'>'h|粤租~^RyH,?<>ײڐb/IiKp{DŽuõ$a]Z&M/)_cD凄-gNnMeWO9|"sT3Nܖy}!7pBOqlX5HEpgS(,cϾs_g̾m!Α"AhMEՇ& }9_< \BIy/SNjMKmk ѢtM^R.+:8_ɾ`:H˯HN(KI_{K\7CA\(M__J:5b!usY`* x.J!K$pSMx_qȉA+_nV)[vT%/oRA|W^Fg:v־U ZD!s|v HXcLck1ƘX.":nV]LRSkEՔ'9p sr!ɕe9yi_P?HIB}DI tڔthNAPc=THQ.Qcj-FZΊNC}H0R o`o(CLIt԰frJ /ҶBh_D<^doVdHOۡh7~Ut(]$@\E5@z![K:TO#D&٢x 6ԏZ\M|gF'y4Ee_To&:>FuD}Rݸdr9{ LJUDifHi V[meDm3Db\Q0:b$::.kKy6ԡ"IA?S2U@ԺߩFJNz?5 bc:W37󐶪OSOInD=V>tSZR.P$,5tZF׹1d:w'z9:R~e7kmu瞒Գ ߉4$"Wu}良kƆfզ~)R>V]d;߭!(ѵzS]k˙+2hXsM`Yک.BױЦ.r'&% H"u-FؚT4wuHpw`¸SV SXc18g+o y,C{BfJ8Dw=-qٹɜLLR ;cƌ2CHDhFB.V]קQJ\Im]H0l".HRY+wa)D&"JEN֏z w':"*}b%?S(\W~֏:ILV (KEND^X AEj f3yO8iҤm%\ĝ~<0!l7эh :^HflCD!G p (p%jb/_^GJnu 2'Ⱦ3 muMqF"$bN7F>`ҫ$rgb%_4pIiO!psr29yHJ ȗwkq\9D"WE->H#āxuy$ s.ҊbSENRa:yu@T)ۨ_:#4 aC/s) \"!oԃty= Tul[ TnrDcI7 POq;8A[fΣw²;\錠^sZz繆&}$:'}o׺dxbo\*SXc18L"KհQm4$5$<DO=15y(41Vxer,X$IYE$0\95:("!kdz"ѴDd2D8&BN H>"x"tODHk1I.BA{4Dα,F M"g:˔*1qFɛ  {O?bueGH/jdxO|E9"sί% \:NhH,M*Gudf'+zǝTtP JLg!'9!sImDL$pu<*EIKU2r#43麤x1"=[(!pȑ:C#+E \dwOk>K,XO QsD6t=R<1=t܏8ߵ$]kpDtDJx%ޜ_lOEc \c1G \ (pi<#4V \1#DV.2N @8.M%4ޟk [uT9zU\^jOUӹ^D͟H{ceݔb7mu 8 7O~. אkk1e1csҀNE ܍ q,0"ac+{5[G$*B믿.XGJ.b@"rYE pƊP“|i f'T^C(@_D10m㲂D 8AI{))؏_9Zu%ס(r57 &ZߙAr+n)>:Lj Zm'SCi,NE~۾ c̏b'NOb/=H{U4˱-WUYGǫ$fO:Bxk/[<~&LqWIl:_V;W>NTH],ё,CsC(.Vj@_UF 4#XFd&׵k?ߣmDh7 #:zP4nwΕe{LN;\JQFNDFC{/!N #3t\_6OQT:+#SXc18JE>{jΓ@"! F'҂}|(R[g[#"DbF9ѦՕ$;|Vi{E^lӓ㽒e HC4IHLӄi ՚a Ǒk-Fk!* kW G]/-4I5"aK!JFjHh]oȉ\EΗxL Isqpa{By*"y31F|^'0dr^H+"!:%.."6'OOU+86xdrD&#g)q_(b)u Ď&psZ/:8Gs $^#4Ԫ_(;p$⊢'Hutd?7yd!ou^dй49Ν9Z#a"J:NKCxs^JןHWtUQ#!:?q)p}z2Խ3kҙDGd֬YqNgnOQujA[Q{XcLck1Ƙ(Mv,1$Maܰ_ɒ7LRI:UCS[3i|x|^ckժn^ pYl։.jjg!FPI=ĺS O"Ѵ黑QmI*"0Wyw㶒'C$BNJ\dNBh3k;" \oTxÒ@"$~~%TA #ұ:}?)iڴix@rb(p%js/3% $&mDis7s4jrܩDAu. @v4lPyڏ~r9Uڐ_8%^_^) \ud%!::e^~^RvjiMֹ^렮EQDnOם wIh#4A’zmNI}\u.Uģr31_Ec \c1GPLDȞj_OɤII:P$Nհ\ HKǒ0LDA\H i{S(IJnO7$^1")%Fz}38[9!$s$Kp2*=\G---l,?QE@"KJ @K$#(衊L6,ta G \g%xI}^Ҥp& 5*26ځ=Ko&_>AIP'-/,Kb+r^soWZ.Ջtl`bHqf:/(X9pU7/d"JER7"Ck2Y%{UX&5Z6z.y8V\3*2SXc18JUJ.0Tl|p?ӐG1UH/+f:E5ۥae(&o'J ɍ$e45.Oq5 [q0 , dp*DgX~ 5&@}Yږ5 &H+cFR##A)Lh<+JJv2:uLF}Aӹ±X \>.ZAb8 \7T.y \th*TpNrΝcu|ӗR~{f뽃թO\J;徢m;Q|] <8 QczkգE^1Sbk1e1c.CFHc$`0vڝÑG92}h[*BsDH& &'~/! Y\%EJ3CR9 T90yY/mgkLv#&`D"KK"/OH<"ȐYK/TrfBP  &?nE:#^hhS$ Aw#2о+Ju d:녘E]ܔ\mV+ħtx?_'YK4:kk&:SL =*E&S݌2LD>w#⌺F y/7[+BRw%C߾}c#VuFUIv!7n(fxz¹ƶA7[S٪{N༞?~P ]+s-;S Kљ1e"m2H7a: 7 G- RT.OB*K/ňTѤIA}`"bsLEH*TJ(8Cʑ{Q7%Jh p:(By1u.\}jWI `Yw=D)5L~ԾsYוxOI~tH5 \c)[,p1S*p`zl"9Qc}\#ei""=%Q4L$bM2O-'ܲ=p|H "!ݠ;٤h&'zWщ_0DK*|'a|7 ?&Qdg+IG&QwXǭ$:2s쮈Cg'C$ |`2X{S̎l;F:iB YmLd,Y'q{S$.A*}"Qk8/r`z:R**-D!D!J$܊+NmGOMT5I"h TI}M?Z.I2qPEkbGSTTn̤smU/ⱐ#B(OK5#zGζ Wx8b,ƾOKu\ _|Ec \c1,pj+Rq~|!wΒ;KCOUc'ٙ'!VHb%#?/Րi@gO+i|Cxm̘1.qE ܔ1%vYLb}R"9Ò%P[HL!]7i=uܫܳmM@Z2QR!Kюě&\z(!,Wvhf$UuL L}!)q5ue$"fI2ޢȂi5I;qCXG,DLT#n(29I/;v8=$b9RVJ"U@ sn}W%NJA)!uDm\뒼Ӻ鉬<:ܾK(FGFt0\D*?p}䚡rܹsD YRjeK:@T-밢Uq+گ;W=oMm -kk"LQޙr;2d!D~Sdžngp\dۤfΐ?Pa]x?ۮ}G7G}K"(?+E3_~C@P#r9E]DWW-W״`!V.s$p%yOH9c܈|$Ҡ0):Lo\]ZDx~}JLRgGT'l#y|.8^Z'8',v4)vYK\qVo%p½ \F{Dy 녮U?0up=Q8lG1?($pHfԱ5D: \K:%th隿OVj]eRγaÆmc@GRR#w0I~luz*.:D?S8V \c){,p1S%pVM9sIxDBPA>^?Ei*"`>AMT["DD"N>rs2Q~G>KnV!f^4xITl#tӨf$]w[ $mDVئ:{Kуy ᅐtMi_ndDȕ;Kxjچ۴̔Hdn!e)L:k_o)q-']X$Y^a_(j4C4)ɥƊTBm!$H'%bb/R0yewN4&BU;m[|niDG##Iz_ O/pc0#X}izϲeؼiLwQ,Cu;Nt"t3ėcDPpa]tv˿Δu(/KgӤ:oRLV p[yLG}_pͿfݜI4*:!ί_)ŊdR2F(4M"U [m,kv̫hoNǍT 3XaE ś \c){,p1S%pSHFh=,HNkQX)/JRx<}+p0IFPck-b+&A2V߻swmD3RD5EVM"1{֢1mDIljMVDnkEzIa1yj>d;zΤb/KalM߽QǎӾh-ZѲGhhD&A U^뽧.|LJk#r"/l;0 _?w3g~ɗô?#G/'$?.pI[$ַDJñ"OŁ<#HBD~}wKEA~o5@;:&@@F%If@#hYb稶RlgȤ>Q_&l+)!z8w2z..imŹ?[YJtpPLi"GoD$%'e{\ː$ö)YE&[LKdJCxcB Ƒ$I~a9n_f;DNC!ițlL' `">Kl>}zS"fepw <~$mV)@ `>ESאdH6>QGʅxe:>b9<3ˤLѠ'Oc2 9YOeJ\ P:zcq^ZG89r"={#kQڜ̙3gɷ)zk|ti(olPJT/5qL4)Fʳo{".M= /hQ#bk1e1cc.0! ?0hQH>GT}ri? u_D"&9DD20a y|5k#n2( F4#!9B@ qTt90_d< ]Ea?W,p1lahО5cLa,pK3\ 9V:Iؖ  =&5hak \6ФqG(?xVi";kYFR4nW,p1`s[w}=gN.Q c̪n)]sqb(LDĬ%)?qB75 \SXȊw~pƈ /:WY]Ne"?&GӧO>W,p1)\&W\1AEd5Ic̪),pMyck޽{7x \s~;th޼9״yرc+bk1ϴi}%dcVߍ4<-Zd$ԜuOcYu5)o,p@.]%kԨ5k&9CڵkOա(pΝ^!\5Ƙヒ;ɛTW,p1W^ÇGR(vO<1K$nwdw…-X [1Sy5)o,pMIg}'<#[ZjŵkO?4F*;Gp {eV[Q \cY~ Fo6>Iڔ:g#oSi@v3$mwC"o'OU/6Qh1Ƙʋ),pMyckJC dl'tAmFR)e˖\e2&|뭷O?T \c)9LjLlCϞ=%Oݙ8C$.+z&2C-EIzhVΛ7/ϭNWkYhw1S5B*U(p5{Uw)S4)n۶mbu?0̚5kGڵ;3Ϥe֭K$;#zy睰jUVZ8,p1";p7G"i/w}>ΓO>y9S(x+/̘1/!n)qDr!ߺ1S)Jf[̊jժ[?݁!ej̡a]w()}N;׌);Yg:vx'\ņ %JlѴil=ԩSLE Eg$e]fk1XJ.dkE?@{챞~ߺu난-Zd;te˷~; Z5%wʔ)ٳgߦaܸqcYH.fEr"dsB![<߭YwYj}Ij(zY85jT}M7E"E5Ƙb #<%ytIYֈ–7D792>c1! Z;QD2# c1c1cJ,uҤI1Vvs S%kC-.Uz&7C2a1c1c1ƘRyKؒÖmu.!M7r7["q1c1c1Ɣ)mJ(jdejGʌ1c1c1fA$.Q|IM9rc1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1cJʃ>|Ю];*Uxc1c)W2LhڴiĉS1Ƭz5*t%'\r%Ac1c;0zGo;c̪̙3_{p\c1c1fe`v n;b1ƬRL2%t)Rzz[. 2jO}w1c1c1Ƭ>[oEa/Ν;K.$seeL.]c=%/~&M Ǐ3c1c1Ƙ["k_{; |,rJx”)Su^~^zdguQ}'f[hӟ=cg}ݻ!i7Ĕ <>Aygc1c1cLi%-n|[jm޼BE_,\*:wFϞ=o|GQq3rc1c1c)%⋘Iϟ 7[۶mrH% \?"裏_{0{0vh1TZO{.L GO6-L:Rl'4nܸw߅o&L"? Vg.袘H_v^{mWSnd2Xq3j4@2֍-2xqb-r; m*?6x{/EYfl@5܆ \"Ζ-[،Zrc7x APON=X"%W7T-Æ sٳe8d2LBO.lΝ;l=vւr0?v5\\ٯR ){tEl,W`L4-gF >l۷_g[?or?qDiu`>shgnmۺlcl髂RB HTGpe[lґ֙e9 ~<3vqiffjgyƎNhт>5>*z*3D \u 'P6!«4CG'eqnH%_rIg73Vwʰ s /dYf`늩S87? 7~,ӌܘ  ?x|1~v9/_NSO+kd^"K0z̘1j?ToE*clr)orY؊eaicp?̙3L?]_epQaB|[%( mjM)] Ƿ|}0>Qߪd* 9rLSo ~hj;/53Aዏ?$E|vjݺjԯ$> ?pO\FYf ռ /ߨg<䓑o=xFj®_{#RJxK0E2 2 @W0$pd!J)${. Z:֮ ;D-L&ɔm0% )%Z `.2g'OLTX}K "AO` CWAl(N]lؕ'у=>!j}nh[i[ kW˽K&9sj ޣ?oڴ6 $`_*`I'@YAeo"^ɨ˾df~)Owp͎bW^?i'鰊d`Q^YV<ճgO Cjҭa*0O5YeF(y&Ck׮Ėܙ|\٪|BUU+-exx4Tn(*v$t}\"&d2eKq!dOhlْO)dLEׯj.zt<뮻ՀT՚}Bb *Z})U+w @mPh8huU#w>*w"PY _׏QVQ \@I5 O_<m'?Z.ڔZ Т \Sv3kft̊'h eeK0ӱ+jĦ\JR۝U1~5*W\J2F%M(K m< w!QF#QkCƌZe3W2 |dd2tQs"SNf >ڳd.ѣ@_V\nu}ro3wY( +3^xᅀ] n(Pe >7WROORYۦvw\o_Z&q@0 1p\9pMp 5$i~ xɶUٸb.'xb U35j;vVZe721#͚5 4[lk 5Jq[Εb`b4d2LyQK QRe%V+U u>+#{-*p)C6v mW%EkpqE\or&&}< pg3z$e%V7Aߠv*ҖJ=S{m5l祝lʒۑ+(ri{(/.[x`p3Ye%J6k2kffd)91 C-|7Ь/bD>+^qw>lpN$hܹ3.yK.p~o \d2e[ efO&%{ ZjSZ 8N Sڧ.הvXcL^F&1 eڍǻwމ?C>n#^8߇$'ǟ 8> _T옒<><~<-~H Ň͛7XժUwwys=AūsO>{19WțL&ẗA&"P`/ 0yTģ$I7&d7?|P[kR0@hMp d`,$@2~qAxawL;Jl|HD'GhI f Ńmj":צ?`COާw> OXcz ı;|7hy`g%53NR>s ¹T7Fox\0*X_ 79Y>>3b>~)ޯG'ER7AGܜJ'WcM}*.qD6kfgx?6| s8g>E@,+#I'}4UlL\l=;hl'xS%~bfbz)6WeB=]xk7s2p L&ؕ9 `(dL.@8ೕ }g l YXh% =(>8F(dzHߤD$G=&A<3PYд@M@aA a 0K 2W^DRk~ @>~9&DL?8|?rlhW,sQZFݵbYusZMip(K?6\Y 9D/qS'M95O>'B,!.7o8` 4pZcY.mj"9B>wɱ<Ij}]h9uծwo6o__7`OT>֝/ӴlJ˧ԇC_j>"P#\G;Ub@|IR~?I6bO|_9MF-qذat]7ym~+5h*)?=|%XV XqA֭6m܂ұժU+_`M7֮]5>?J(5L&)@/lJcrAyA% r }rl A˧;6Qڥ Cz~6/DbmJ,:%%>#mmETFg5E3"@ߡCc0^ j8s<G/v w~}~*pML߾}ݾ9_}/qV@`̓lqEjʺ Ԡ.K@oYގ~X$ܻpϪIRc?nKCY{~_gTd/YtPv-3b7o7p\5ue-s4e62cQAآz~/Yi2UB|mxOBZ"9}sa|r ~O^}|0bvڹ}#I'_*$]HǪ|wxw&nV˯_nn_lˎ/{օ^$d=]iAնen[߭ MۖrY?>$kZ醟~\}ZnvYm֥`7;W/ ⣈;v/*a( 1Crʯ]?n-B>4Q!_tqMZ}`+ 7ٶIm2'H x$f@ETD_Фf˔/_~?0{9J'23k2L&){\ rLSPۡi B d@N"RF=N#G)j&P/< UxG?-l\EA>j-h4!Z22^ 5.0"ѰP „ Ti{ ``k¦7 € b#8uXlxA˞lPfB󵏿%}}B;hxGg &h[KhmY_.ied;dr:%FNa6þmZ7?cpT~qcp7 ;4l?%WOxM];;p60ժeO~֭}zx<д_B۰K&a•LvȌLg]4{l?GUŹ\^ϑX%SՅd{is@gB%)/o]-fi|/Vڰ05?!@·wBc2YL_Rې _ɟ Ɨ_>>4CZ8ʕvgi:hРPwGI,CY }v{i{kA]sMJT]k' [k|)O1C6o-{0ңMأoX$új7zir:6M4^ş+8%LkG2;!\|jh@b A%#LXLΕ\Č] 19ϩxR9Ǖ4Pؔu/̕+W ʍŲ)bO{Fb0CW/ws_lOT{nB2$סY'w}#/6!!'^k{&~MO5k2L&\k,ݻwO +{4իW8d7b}+eidb e{l[AF*d6 |#l_I6z=A.NYنt5ȴB6M` >o @]Ha }h}]ںcW?Zw*5+ 5@ʣ7z>xUFaE@p(tէ 46kvm4ԸQض^ݰMv{ .w~ S*Id4g?"lګa{Mvhp,s6Ct3}5}~Gcm;ݳj{=zdMص}>|٬z4whx.Şw}sX߰3SGZvz۴o}m|lcӔJh:5smVk n;uoOZXV5J*qCm6Y&X<=٫%Ec38ωGRwS'xR8$)+u+{\'"x?}LoZw|Pb2dM]XE)m򥣀(q_ ^~y'xxqٲeF=P~^z6kd2L&d?Xt*ج\h|/:3AY@04%R j{ , Fj\s߁Z4f)SAzGW~jgSO=@2:0M [˒>Qh[Kckȶ e`q<|`;>L+zuY-WAmjSOA||)cM`| rrpx\Yˣ'KZ35q${N`?[/p-P-e |yLJzp^^,oȠ6 ɪ%$u3'FAMm0ytfuqt)@v@5f.Y_͙}L\MGIрm|/ ߰r{WssH-x%<`7 &I_0Pۤ~?=Cޥϗs8@^TqF7oJW7f8qw)AkW;/Y4N%Z{ZPLFվ:k{?kb> &JIiA -Z)^x 6m[bO> 5L&ɔU΂|(9;`įWZE>4X~gz*'+ ,Z Ȝ8 6/0>wЁ@`ǯl01p'gb.`d-Ԟ a36\AW|]>> L.l}`ei@hV; 6eee]FGG@ )c}m3:Z($[]cfǣ~M^cR.t٤Y^!.ٰNqeb~Yڕ[,%Lzl=3,hJ삏ƺlcp(@هVݽ1]f\p‡t'92+9)8kd'6䆎_@cy|_>DtGi<%U~EG+,Y9yȉß >l(}/~xZmߗr 3N-|:vw%]<91uY:r ]q%{!nFܭ;gȲXzWno{ I]]f9\"v@^xlZUy?o~/DfG ◒4 ssCG`3ùJl'`zO (~7JQ584W 7~b-qT?w7S|i}5)MF7R^1OժUrygm^hmo;2kd2L&S); ԠXԱ]"xmuX>KR |Jd]l,fY3V_7I˨mK۩D/g~$?~4#iYn'{A̢Ym#9_sr=#.eS6LۍbʭR`9.e_xB S6_㷃<gܘLJO+p88,=x`aUV *e f͚1n@cVJKv.* pM&d2eaˠ^A w 50h L:h2`u>&hF.5& *x.5щ҂5jQ$b9]MWM̎ d&SN`28Oo1Ӻzl ا}to z$p@7@0gjDoOkBjROjo7 1C1czT\?WL;x`b=pYeK9`;s uTor u9VX<38$k79s98K̀Od2},Dw3\[DGuqI_5\N2ɾ}wtCTV 6{sKr^B2I_ÈňN<|s,~~Rn!Fvě= B,ԩS{  7T:!PL2|~nm2r|͔[[:B5k2L&܃d:ʣ_щ>0K^ 2~XYVlA{\ fx, `DO*%[ @%`R#5;wj*X. ѣ0E_d'}`H&@PIh& te4Y}~٥K@@?qK!Ϥvkңw,K_m1(!PO W\q>EAɘO# U{l^z=hyT0,m{ 8?>9Cv?}ExǚJp1ؕOYkoۺu7tN{Oz%!#)\ p\9 #zU|Vlpe֏SdKȷO=ȟ< 9n3ds|>)#f  .^׍?4>?aZc ~ZMh-.9Mp ^R7>|08l57YjrFH8YC3XN3.p?ppLY}bG3[\j&K'n u_5.k?[o5P 7J*fwS(v5o_ 5L&tnt`_d?JpA0e8GY7 *2P"XW{j/5jp f5D^P+h_C \$оD@_W < Pɺ:{`јrz'q9 q W?yYO@%F!:&{d?{v&XWcIvr.nW {, D}a>uqy͗ c0;U&6[eth`c֭vվm8:Q7 Θv@ u];)տ5, wYnntϣ=AK\||Capi* ^/ecS.'.Di;ՇMS2&ҍ \5 O(ױ|Ot '|ձp$f&Rf(x'@٫ mVL`氾v6%Uwgc~葪ܔcbpw¦d7@] {TypI?qW׮]9O-<%5Yf\0ňec)WT\u ߷Ym}u.pdc=<^wu0d{_׼ |wQTFd2LpI`M@ VqPW@N*Ԉ}LpD>'{s H Dyt (B4{LTF6m>{ŶA@Q_?-"f?t\^Ӏg?ڽ_ju7C%!`p5g-]=dNo}  OJe+`w P0{f8kf,x>%x.r [}xyf.kh2 ,N.+pPWu^yYDг rcK @6\#5nNiHyU㺚}Gy$E'x=@mJ_ߛpKҰ샮]t|p]p|7pȊm&*\774}tC`ڦ}𳅓'don pY]mf~Yc6m&3\lAnO l۱+p$ nV|ԣY,ES,FLx2+;)@j]{'0׀~R(\[ׁe/i,[9&wEݺuՅ< 78 83Dpڀl^J3ESq5.Q |gBC| (;8(# OA:m^Kd=kQ_BAa rǚ+M. (E8eEmcc=:Xۀ&Ln.Hbp%={ee[u3WOP6OoV)+#nga{ U76١얁K&j\#:Q~kJh, 2R2n >ۖW6S_aZot:MDm7q%2E;hΘ[w^WeJnFun~S[vTu.ܰuIJ<mO(}  MYp9>5ɼ fwӺukr$keJPU+W܇ :묈&0 瞛U"!'_ߌT\d2 "P^=: 6lؐ 6?Xf&a@nFC +t8(d|)62hݻkf'۱Q#JLB-Áڇ6_>Ax ܶpc3l)1oW&~r{L)Z,CZlOnGeNʜvV.{G]JYi3gنF`\mG'2{#dJXehCn_Zի;H K\VVңMحW}vz&-۲-? v΄ kw4yso=tMմ/*Q1~Scx΢ə| nsZ}/?8H0-[:Ќ9UBA>I ŒXAbJ>*#ۈpLuPpǥjԿ]6!VllO@cJ̩#ΏӢO, .GOWT"%#&AN಼b68@ }G?~Ǔވnft8Et8SN.@׃,Y2|衇.-q>d"##\d2 f%p$55PLFbcOZAعdժUl5@ b%*7I\,Xv 0<$Z&`T%:NB# <`?7z<\#jc')`~4mڔGu<T'p Hb3p9;aߵߔE}!>TXhe&:|L ,Mݚ5u)(:w/WahIaٌ}gwh ̍7w#6tm~%ۼKaW_ [b-^vi}n#K' 8'=Lf7B&|v`hYmQ&й:'8/:_RR&3.o~2/ɧ~}ک,;Y/vR>z/80Nvh?On2ڵ=yYLb&=iXʄg2G,lؚr+@x=*0T)efT\v+6^  &K6㲖Ӛr|2p=> c܈"n8Ct%Pn6_y_r42[sKVDrG{ LL HB.\j~N-̋>RL]%0W3ܸZjx(5k2L&)+\ PG0nʓ'  ./4XثAH,>vu@lWUβ%(O W V퓡uI4^J jUp6A6P`O? e1I pfP]e c5q܀{t4ܥ'P: -- eXGz3e3 G .;,\3}jjꔿx}!>n岍S3JJP/x ^o97ZgY2Ǹ'uJpW p/y@\|xYxpoL@j#9t͘cH [(;G2}0O?pA& n8 ñpydL7VZ6NS)5\lH`-NZ tٽDZ}ٯCնe)V?amHGC' KXn)i_pǎcHM\Đ<|`..PW.]xGq͵.]sG/Rpzt ,Ko~' rO,Ɍ|YZcRvbbHq3c%X_?<ՠ)Slڵk2vy\d2LY &lΙ3 E00z f@#6Zrz|z2Y . ^cs~4 K0OWmX ՗?#敶 )@&=.׷iA1dr,<] k߮%p;v %e@9 p1'СW,zjcm*@[`]:`9N1Q[9ekp\@3QvR 0<¾.WٛF-I;w7cq\@9 MY!?%q7~"/+_^ %?2<]W:1eѱ0ڏ?s8\ @Ԍؠm$^g?wUB_ŵintEaǮ-]!0ڸmc'@eBf*ayLBkeR쟐m_eĩ#ݫ퉝,yc}lK>oLfj4_8.q"?_zYQlnv|tɏn9u'bb-m?ר3mǍyn,d6O>c|Zj~,^{hvmM7\tE5~`߁њPH@5dlz;~2Ŵ4^ (`<5rluϞy{>G4+0eN+W.@ zjGgi`q 5it(G8}rW92c${0K-1:(?ٛiCLҀmM cr3 ׃)'pvxᦹ9 ; G3Gp5f)e?S pԶe_{R@\V5ojI%֨ tCfMw\m1[B!&'3-7Fx _KՈ'H&>eϏLS0Z7xPޟo<%vݥ '1 ڮ,?tڥ pf-׃VZ%~ȁLoPy+jA\@Fvտۚ،TĮn9\77gO~|F-yýI@)\3s?J=Z4-|oڞa߶mޒdg p4eV4umLdwTKBd; x$o9>څ;uti2pul;vLl p%cMN"(=GX!6m?jy3;[>, t->*yiR8 j[{6xW6)|)ljb-_z#\wѡ\ &M^F- ͺh~[(7 NpYok9&IK z3pXyسo{G_Uuu}T!t>|ƽ6jˮȱo*]{up6C9WqK$T."sG]G,wӿpYjxObHF I<wy EGdA$f\DիL\Ǵ>|17Sf͚-Z $:7j(Q" 5kd2L&S&\CD2-u4~ x7W$>Q&T>$8e[d ݬgz ('=ՎYvF@kof; ~Vrfv'ˋخ2Y[@2Ѩ^pC6>95Q\9Rij1X=}-ڤmU %k)cvg ] CrvoDBV4Y{nֶް,"EOLǭz4pɐ] \2gK5YמX k&N ! e\zߣe\;ʈ{啰icڱe+[_B[PC-m6/Y -n :i 4M.: 6+93G /zt5p@<қ.9dˇ͑?܄(\:Ԣ~d>3EOxGcȯz#M}|Yj )`1P%<7þmƶ?שGL\AƹK>qpiml=m3we\Z$`Hشk p߲e wMjPn֪ `MtFv졖2`CܵDqL⎛4\#[Y_`F^|E@EojPpGPp= jds?yZzTPm޽:ojs>*Zf}M1 <8giWR߉7 '(AX\mߥL_eZ&p!p7`/2O[3?YlU@+mΉۦ,r<\>y`,b}J9Y33|HsՓq9߽x6usBeݣs2Y3Ҳys}v%mB> _@a?+M Orקܮ>V>nG3 ȦRk [ Os93MnAe lc>M<&V3pBcKuV :' `_Op?!17*ny,C;T|xݿj֬ĿRcuPn ՗XpFd⪽n|F^v4ij#ݧny!Bmܲe6Q#$Eld2LL[(?rk  ~b@!{)c;/FS3  %`f  }]^% t X&9O+>ljٟ# 8&'W{ep6|>6ۥ16'?d|p,}wcAu ~RIil{,륥/L_p8umP59{@@syO^ŴpA;7[|?T~fȍԄ/?!I!'zǝX8#|DGlOµ )v!x= A.Ӷy`I26;u$ӷ~$j/;\-> ?F̂/'Rx\F6羟0kj? H{"!~ %/ђRxH=0f)x?uy}yShQ?DO^k\||5kd2L&Sf\2(1L:|.?xF4XmS~ = Gn]@Ϋڷ]ڦ->'mBb3ǀllKl9f[昰4mb@k%2c aP1 {Ͷp}V0oiq뤖}ّ νH$Y0#7r!n.#!?8_W|K3?`ss< nwMLr`5/}@ ƾ?~`g .}Ԍ <Ǐ;ض#p1柨X#-¯-b7y?+3%~1)xJȶ{x?ol?9 N&&׳=]IKI&|;>\٧I̙3ZjDdeʔq ϔkd2L#p  3&f a7H@Ɣ P?:7A=AJRkedn (Y>H?9FP@؋2f}>l˃? J8Rۻ{ J;~AxX€UI1ݤ9 rA8(`d4F\c"yo%\MI' +ۈ=_9 V ^>pk\'] }$DZEl  Zyd_ xsx?yc_"*=ǜ+kb9b&f}rb9b;|q>߸Olb.'4dq"駟w 'ɑ#uqm,Lvfd2L#pMI&0?LXc2Xh 2G tl\Sv \5k2L&n_, iҋE#YzL,c~mk׮k\SVO6&ۗ;w: Uj֭[qr޽;\d2LpxA91bx̎6\ZM533k2kfftEٰ:$Nww\rMbv7e˖m9eϘ\d2Lp'KE 4OD3To2BIԠ5f 5533k:"6}PBk>ۮ]`2u|/_:ffe}v<aF ͛ݛ553֬Iؿm?K8wfey]beϽT3ڻmر[ 3L3~s<}ar[MK.ikԨQ?pDڵkhXRJ 9~GJ:xpOz*osc)&d2zGP^zC|)Cez۪{UeUZuݷU[K#`{V{Z~fflʺŶ[, &l0aB֭[QI]KwͿ#{wMV)&d2,Xf4i d2L&ɔf b5k :v5*(X1~7^M̚5+Ckd2L&S[.:Ԩ"_܅,m߾}?^53dwoI#wޭߠߠY{4Kqd^3g Dׯ{h }VXԪU+x L&d:|E?X~}]6 Ǝkf6f#ߟّ?> ՎYj&L:5;wn|`q9;_K. vR%&d2L&d2L&ɔEed2L&d2L&d2 L&d2L&d2L&S\d2L&d2L&dʢ2k2L&d2L&d2LYTpM&d2L&d2L&)d2L&d2L&d2eQ5L&d2L&d2L,*&d2L&d2L&ɔEed2L&d2L&d2 L&d2L&d2L&S\d2L&d2L&dʢ2k2L&d2L&d2LYTpM&d2L&d2L&)d2L&d2L&d2eQ5L&d2L&d2L,*&d2L&d2L&ɔEed2L&d2L&d2 L&d2L&d2L&S\d2ςUV&L{`ƌƍ+V7o>jsٳ> 6lX0~{Դiॗ^ 6l7G y-Z&\d2Ҡ_~9Ѕ.hݺu0dȐ`1ciӦzsqвeˠQFABBB0x`wiɒ%e˖'N 4(8p`зo_+u˶w筷 D(Q,M'rG+f### ,X rW齫2Պ/Eh)VP!yW0;vMrʑ޽{۶} s1dd2LTd>#bl=߿PtCYv>]vA>}͛={ts,ۑ#G}"F?p._e[/ۯ[n3j . O䓮wRgp* yz }஻6`n j׮L4)7o^о}f͚=c9-Nڧ۳g+ϕKMUg>{ycfygo ] l PnWY'W^Cmj\jTbժUuŽB7B! ŋOWJ{RѢE.[#w ;Vӫg+`vʕ~̢eK/+m|Izk.sMԯ_?1rH=V'mڵ:E@qB0ٟõ k֬ISxFN7tS_4yתvrE/@,\vB;裝6qTd9J >bk(N:X1KK'Ev{3t qSurSC qw]sn/  m^qu*g_}f۾_¯nqn:owU> 9v=B7B! R&;$L`Vޅ TKZW== |>Ԓ`men Ğ.xꩧNkʟn6mA'øQ`JSO=@n˗FeKB~^uҡ:#}rNԽM䢡 t1f }P0}'xZN,Yr G91d*N!QuYWs8TinӼ99B Kˏ?~=37w_=M0;[慘/QfǼ ne˿1gZe7T7pC!B!eLE32.`iOX~R:ur[eʔy e>ߑIQI.qNn~/ܳ}LIMDu[~qϰL.TH/ ^{ ZqvC8tȽf5l^#nNPx&)mu:,w8J4]ϻl.F"p`5F \{.K0rR=xC̷8`̡kp#]P qƱZ ۥr~*+!~eߺ*gn!B!%\#)8@hѢJ!䂠f阯[]lɕ/Z\H]%J˽Q;;)gWGsQJA[N*i;_ #Me6J#UTa#kz|N_\!;8 7?'$.BaW; !pC!B=T-It)W[tIrׯ?CNTK^o.,[BY$h]`)K)6Ab6(,}WP/V^z"}suj#br}]#@+{ׯ믿π6m: (֮] ">ra',_p᡺OB>n!nb!B!B75egN_gyfBp)T--[\ Զl,\j #]I W ?Ji{T@Q :y6<.[]`wC ;k8cZ+n>>+69M{Iqw&e*U*p\Q܍7״iߡݰaCBU~|.קO;_}UI I&Bb!bn!B!Pre7;x^*bF9XxOjU', 5tQR:u+[lYK. jn-8w}Qٮr^kw۸6i i]C:E+ENyIt%1h {.7nܸ;Ľދ_o wԨQn!nb!B!B{6b&䮠3 vrmT>eq}7/pt\?6!%7 U ,~su_~<e$M PO_ JmXV /8\wyoFgtuwzX^?wJ*T\mLV -߼HtAR6}{},`p+塛/W *O m6mZ!n!B!nB!ž ͚5naٙRv&f炩ڷo%DZغu RvL[R]^VеWjC+f{uP'I[{YZo)e)me^x!!w ˽* 3K,9\H"=׶@k{BdϞ=Q#;[ Az%㎄.e]s8DR ?# mh:uj!n!B!niȑ#sIHW p3!B! (=z~~C1ڄ6K}޽Pf̈́@gBw)nR9Nn {_|غu^o][a\ Ԯ/Ĺ#.zO)h ?ooݻk O@tBN|N⋨}GM{5pnP"'Fnbn!Bp.PQSv[@js #|j,`5:t萨UOl ʈ}nxϸwvխ]m6U%|JU`e'M >ܯ,+^E˻G噱G@0fZ`}Ԯ]ۗK5Bu{$?|&$cu X9P,)@5ٰaC:l-wm=VZMPhԹz+ߺ ΗY.Q\ZM &lW%wӧOY$M__qHmd3x :iw|&GdHFK#nbn!B +WL 2$ Ԧ];e' pu^$TR'+fF dO颕ϓ{YЃZ ڕk'61*h.,Y⯹m6A pe˖%VXXx?/@}w`،z V]tQR*$)#8v{ieoTRa"Ӓz~w{q,D;ww**P7vgY !лwoniYA6\r A·VUGQ^p#<7t j\fN8Kcɦfr̗:jqPV%ϟ3ia_4^zO!/ <OcYpC7C 7pC 1`` ʑƟWJx-h8 Qw8 $ĬY Oa̘Z3fs u! ZW9gϞը٩PIcj $*_]o?œ F8qw0a/z8so=|7 \'$mlntҥ,y πkpkyїEI΂ ҁNaÆȻ|Caw?$KܶB@N{. u3P4NX - Y(4pY鐘,`& c5TɫWY6:#>,y˪e&N. >. `y&Og\te7z8?OYCA6o*_yNf9YEBDc^.ZQ߀ED!I:6lؐek׮~̙3}yYS^gܟ3@޹?[n!AM3fo}:І'~r5jXj`3wn#q=^ט䜂5^J;X}$㘟~>ii_.(Y0L!OD] HO{O~ QI h2%pbOwwv0@ Ldߦ%0o>wi4Q}y*V9眎udmٯgmNj5ù (D gesWŎ::j{^)>xЋzg}(Fex_+'JB(聕>y'$>V{W!@²TD5~Ps"N\70H*2cpMe0Hi^@:cf9N~)1d"~Ľ|8 6[IdKuYLHeEbL+b9 4?Hʌr8ʓeOIWNe@\:O@[ ,J9q?q]M{!E(w G`2 7?/l 7?."{œ8 h-eE 187=}<3_a #+b "08)Q3LMA+ < ~r+o0Q$]9$ sgmMg3 !SJl֗=CN{x+Ok,u "1`r~j4S@Y;V8cP~9&E~Ay|jks6DeXs͐Yc|Ou;RTw1*`}wb1oW֢V yո\=.Xpa~ڙpy\B&08-V.1L>i 9FJ§9 gGEU/j24|7 1Jj`j!l00(aP4d@ps|b$XImn)2 # p41iK,tpHjZ%В#3Vt0r<j%HH}ҁ[` Kr3tO1";) bs>F e-E, ٩SbƍT.x\)τg&!Sn;`ۜU\8}??.'9kE7{%^{tpVt6N\> pf?'/,/h5,?~4x:E甎"G:~a;.ios.nlo*yd6(Sˉ'Z&]i1hQ c>cPiԀΧCH pDl/,҇ 蟀( 8ă f^r b|7mܜk\WQryì(ig~Ogʭ;WV_}ffrhHwn(pCv~--I-O`33N>@\Rj&cmP}Rw\ &9P@T1X(xpMVo *4 Bupo&~y&k8'@:I hs#$Ty1&6A5 aǘݫ}n&ƪE>{VqmQYީkQGVvNٓGߤuN:;imEi+uqri(#wg8{"E83x@y}z':e=_R> γ=Qlw93% 9&I +7z6 26;v!uJ0n-($XE=BYxADŦ5'\ۄ[Tlٲŧ֞N^Hx'Rha^~=Rv2|&K`%xط؅KK.ī-;Ᾰݦ#`g==jJ}ʚ>uCC~iPvyp769ʜ}(CkWO7C0FNlyצ^hB59epqJ]a;wO!;5U*4U5*RZqMVrw8=ʠe`ԸBBDT>_йU#Z/j5k,RPV@/t\Ƅu(//tQZ,=7j) /=Ɛ{qbDzÈR#?]7j|Ԁ5VƕC)A4t2tN4*/cV j M/$4|ҀjƸ)xSQ4^ꤟ2xk &Rn zGڀ\fM\MC=D\`aeݿ=[3Ē]ݪONzPJ,_wMpι ă'P^*Ƌo3_jWu'u޸CbKCJTaQ5IP=:ɦdalC$r 5GR EpFs}Q~P4'ѽBP G~<}j7IGF5+ җ_+ͿXyLunFǪL}.vz*m(qD/ `,/W$LmUK0Q`3wHFU>]^}/Ɲ@@؀O@1hW&kC6'y@#@m-[x<{`)`gg#192ȗÙl^1YN; oX~8)Xrg9!5Qz7LP^%X$s?; > D >I{e H/\{-I$y抲fQ#M4zJp20XML`RlH2Or$SNI+VO^Gya<82»$im+yoU_*[Iʞ":.%N)Ss()suvƔ{-b*IE=&ڛhKvcb3B|ZÝhy?SBf W*jxң*Swjl 4r5'5!WCRsa8u>ruN/K`fFNV'# 9k|MˀdN-"ZU5> I/iYж6 ݬkf2ҀX݇so6s) UC3a|%i@h"賶-܏ M!܅į0s+.߯0xVWۮ[oX[˹Vjq{zEJ鼷8׮xy[;m?^ޥQ<"yZeа>]U)֭~p N~VLa>/~>?3rrc[&-PwBc=`7tjS7zv+wDCsLGf)sR .U{}^pe>|ʫw|/h>yIFnщm2r6;i@GK3},L0Do )*N9Io#lꂱ[}7|LҀGY)M'b$A e\ RgMJ߻"ҀIz * b8}wUO]*ޫ,Wͽ]{[Q{|5^T싟lm+a}_b&@.磶ݛ2ky89c&T߮Pӽnfs} 1o6n*-ݓ|>4JkMO>^.]ejNWr+?i{#Vi]ˮ2}N^.TNVhe*ᕐ\QNp1|@zLOht"}'̘&8- xae--SiBe6LPsp{qӾ`xPa& %ip Z8~'3*po[b.d`'Lf21JX@Ǧ餃w|Dپ9ـBUOyd-qgPzڇP)U pyXyD]Ƣ fǤ;cZ9[݃V([{=$l*58kI3&x^@P'b#a1dP  ˀb!eF7r7`xgllpyFDWo@Zԟ0 w`-ug1\wHY1\G;e@ ؉tRWQP~jK|gr1Vvv%o5%I'8D\xX]d.{sڤO}`HsW٘ZLp O)[ʙwvz{ܭ)0G X:Tg|= 1 y 4fxD!>ྀR{_IrQB(.$Vuy$MmT{&,oy3A@ȨTF|_ LT^&\)sԈ ;]p.@ mIjQݻmZ zsyUޤ[7s*-ne<,ӳ1M/ nU=6K]pQ OCzWy,3*OE%5X ZyVd p(^6Wu;.Ay+x[%s}N8ʍc~ o}swd'3jV˝oAK,#̡<2g( 6 i1r\ao1tQq}>D*z b4(;U~WؠW0Q`@x^ g BwB X1QqTMcש=Gc30cG@iKs&X\kթ5k[-gHj.8ʢnKnȄ^:`,ע@Wz#(ߩX˫^FFbI> |oޱ_0UJ)6^}~|PrDe `RO+5trFP4DFlakܜծM!Q#*ۏZvse+E>;>h|p&+T`kVw˭w~2b +%vW z5>[a1J`VSL),Ѓ7'8@/Pm>L}vq|ҷ,Y8:F lI1A%Sh0IU՗̵ R^ 0YL N˽`nuOpt0e/kgM Ƹa7xQPOQy{~x2n)ax5WϪ7aD g񒩦0GzR)xP4]+es9SUc)S\4yweWA.R? 1X}4aO:"m/=Fm{?π {XVE^zϣJ6(8 0^&AU?=rf`%eRGVٽc\u"'ផ;Hu.`T~91JR"loG1YLyi?P^}Eʭ\?d"5ٔ䋠4ݡs{h"Js>o*Àc]I{½OD^jzi_LxFuzNtW]$^s6AN=Nt=o4m&kQ=dzg%XЋE}Q馺QwI?YG( .W3;s&׹ir ϞrB\ \u8p}Xά p4Nt6rCp^M@H@fܞ l6CIjDb P`F 1SFq˵ԉBÇ`;ipRljYt>4P5~Ӓs$̠1Kd_ g .n|"iKآLR2u/'0z4^|;Zez)>d\@`*i0d1p2:.ֵP>K`,?K('z :wr1?.sJOOKߦ]6nܰcq f=maeݲhA>pi&~[5kvHܸ"we*?*{ѽc;p{2;(3OpQ 7w۽W\~:wڙ]r׺;F_U.&/^);UbwfGJSwzћչYȻ|)>ɝ}xEΐߤ-1r'CHQo5$sP6^&R1y)O:'Ks+cR |&#y5\u{Ky2Gw7DǏp,S[dAT)/d !rQ0D!RV ݡNje2O@gKdE Rʚ_(+'&#;ߟ ءmޱՖ]LUqn?*Tk衮WF7mן_:N_]>~_o쬯\m; `.`M! y9%-SM:x.3kqws-9;Hj*_@_MT{蔬nv^sW_p'])/Tn9 $aA>V/\ XqnwZ-Ils&v?"b^$;}{J-/>>{%S}kj_/=@X8qT.vtO29,+T]et[i. &+w{W9 ԤCut豩t,g vKΗy()y_[b@]\{| <_s#<ڌIwg"bFӽտ6w5JXT(DMxYٵQ8:ȻTNlWlX%@i(7& /SާZZ)Tqo9Fsڻ[eGgx֤C[13 ƚ d@媺@*+lHzV6\թL̤>3gg ʴP?ui/;s~ڋ ?{Lpo<+eVX0q$>Ca\KuЀDU$4j{5@OHi$wt7 1@42HAA";\)wƵFiϫwlpV f\`-;@JW (qMT:(s,:5+~ 2~a@ch;kT҆V^@ZxgiܕtsV$|<᷈kn!]:͸d3-!8 )PBpطwPM΀=5{'al*6w[ pߗ&R*[1~ж-jTͭp)tWXU7N'3thdt}cw"o):_tsw_/ý 7p\N`QX :g||e7xQr!Sg{.2"e& ߼p1Rv)eAn&q|Vaa +2,}S\6FmI1`g|&R#lAg i2fP2@)Iʒ$QU.'؀Uvl@g@w+I>fJ{@Iݴz-QOSfQ+@ `c\ԺcT=>}[J}{WpqOPq2@U*}Q ͪlp-uݹ4. ri.C\#H&?~>=sWL J@PO~AU{]$+9_,AAAk +>p-D'kΘE(W:[UNbŝvԞJS[s5y:dȩIPM] ȻU;*2rKǞTw#k)Ao|sDĘT}'JCԄ! ՓD9׍spa]ei6U;ҊM̘׆FΡ|)+{o1Oُ4F KyZ-uNG=ޓG2aR...t"ITbT`&WQY2e򌽽kùF"mY=6R=L[Q=.C~V6KW&(5kf)>5_v FLfrd8MyvQ]Qj-8pdP׺gkKٙlAT 濊"Ha?jlh%0觴bkX*SoSƟBp7kֹ=\&H*[ɺRz[*O >#rO- ޼ !_rIVuiuD-Ū Z]%ʹGko1<+ {:B@<;3pUE hLd5s|Nd\SB X$&[/{.et Cu(s ݻ>]I.<+@ZOK0| RLg{pfn;! JUgWٶS9ÌvCuߝU.F&-}@mRČE S]852l~K:KzU忆M-ϲm坉f HnϸV Կ3خoKu:彰 O:۞먽f+gI {aAWmukM-7Y;hB>˙d6N*Qq7Y$Y4X}wSOI; ,E hb,Y6‹ek>,,.]qOu2KLa+?Q^ W@+{$1&_SR:D:->y JyWOuid8ǣ4vR4{ $l\yRτ%>*Ih群B%\_YO;0e!w~\ۨi~nk뜸قCz~#]:r0 ۿM *+ F2hܡl\/y'/p9Dz6㔶:/߫Y nf`ֻ̐'_'#\O҃ wm~1 5SXG?jז| ԭzHIͻo _5J/џ'V- zŤƖep Aߡ'O1uUAk sƧ?n p?d= p Br*i*osϟ`pb938D;maY$HY:֧U0i dle4ILܪq<sTUeNi\4d2T`r0)HVɉ4Nq R{d1A5 \Kcu7@e:X vef[e/~X&; ?,LcEϓgGM5|^SWUQOز[k";="uF%,೎G)?,װa*.7e2FyVЩئcN~brLY? L]69>Oj`q=#$u"7|El Ww?DniԞl8Q{WԀhiK(cFgI!1 ,aWKOzy(fBUq:vf# Ԝ^zE/jVu MUJL|07JZ^H `:SQO!tdp.)eӂHT=)В+3hĘ |O'gZiE9p ( y=:w(IXۓ9kȧp<>sMwk&>pH8,.`y>oAے1w}[)[U' \`h׷W'|\ Y-Zr)We4lzswi!SuEʭ׈:ZB_;\+Y $0E^1 ɮ[-6Lag0mA;.^dObd J)bj<e:D rh13\QnArWf~&cǾ#3@xm_[ߌE} mL!16일pww%3w kEK !v4b tu0R{ĵ_ꎋ9$*N|@ZM9x&u-k>LVdI})o8 #ŤS_W ! R.}I}Z0@~Qs\Vͨ_7x8ܠ**`m>O)eBUZ TےH-$) ACAm#= p PW0l쉄&cj눵t|lpugW.HPe2Ȁyִf^IzV-H3P_4Ϝgh-ma~XQ'Sm k/074XzL RN73[Hh\ ʯc=@p5oT[̙JvlSHx(Fg '0\#w z&uX [/ˎ\S975߬k :$ :vMq7:pGqm8=×G /XCaaFbIT^.+ <<}8bT4c)IfTnr`,˵;\_eӊ|fwݓ]ݏ%묙:Zgo PV)]}[o}]+(?*aZ:761~ݛ4A\71; #OC;8Su)@\@tn2\c}NP7IӔe|Nӷ{rV|SGsxt5Tλ{\?R[RIIɰl`QY\6G#\($J,Y( =YUvQt+ :jt kv7O p4arg{1s<ⲙٜ\{Z l ~.3q[ϠõJb?_p^4payon0ͲXx}kUAeKLΪ DZ[.cOVY*'KMaWyuO qSKJG"t(:DW w sH/tPbJ(V|1nv.6L u\fBϻ&$ո_[U.SspU057q:&z_?w#eA_eyP{(֪Q,\Ho,Ï:^*к6&DE,[b3Z *^g7[eZ-ꚉq+Mi0E({~AtMUN>R6vFیMe 6^̧z ڎ(uV|{;ܠF_c9{mԮL&f]n-X%Wewm~G2‹L0p bL23[vD۽+9GRc4`];l(lPSDqIS1J)+p#2wl}'Wjt7 &m{{;ϗom5wkШM/Um\Slޟ_kk4fiuBp qKFEi˧e?Xi,œ"k 5*аlIHy7xtGBboVe3 (7I@E .®oI!បNi--X)5* v](i.}hM 8XMޏ&`/RnԳMgQb >ZROԪ~3^c?yu(]@-c1[`; F~GI9>TD=C'U%ԭt{(+Xɽ|7صQz~m>S޲g鸣p.c9|FmW>[K9eKVU6S] (%E!. U6q88L/ayOِK+9_ skQfB,pT )5pM'W 5{SJS(~2:hJW˘ F@X\(ծAyJ|^9m.z@ uϝ pMJ2mV5N;@>pul}gmp"߸ A;P@߯oRzVQY ew[=j?eG Mݒ\Vr`1.anq~}>m "*X1Dlz<7dJ\ס_mǰZyeӱiSwr39bǮIfWzgF֢%T2Px,`9\"@O ?p2 cxIVTMWOǏO (X6GO^d8xs5S28OS\FԬޅFγ 8MSngMs XkwO.ޑw+uC c3hf p9w?ƫl>e+rb4pC!SL6p1h ĕ %0˦JN68/ X'2Fq͵lVtTȏx t̀²l8`$(POR<1cpmY>cuN0&QɢN(/jF٘q%poRRR|ͣƲ/cl$*\ʲuNaK ;_i|\P`Evfl*{΃D8 .Jes^IU9 c3m6Š3{D6=Uo ]UOcˁv .B*#8N+zaΕ Cy/z_QwZ,yGBceR~Ɍ!G=C5KG)[U4[6jnbYg x8ѡr-`x5{&fe+>;7%;RByŔ^h@)}KDdY(E:ve&4ZC`3`3 ʨ4٩|Fy\i:x()^a74x|LZ7*iS/4 =tu64lFVr_rs] p+yUCM{n'}ܸ=>'ǐ n& =8 wWZP2f==R"$]>LJsyEq: ͭ]Jy(6W^ H[rrZL!b 75P#E|G{HǠ7dg2pnq5cAfS Csݏ]m')O̠zeN^h hok~"f2v{k >Hm51nwk AQbO&f[0vJ tOm61{UxZ;/+롯 k-dkh3>1jV vg.iY mfmyWߢ6/ !v9 @ ;ξ^t-2[$x+=B-6&ְ,x Eq .?R'Fph*;5&{~Hk`xQe+ hFTJ˦MzoG՘UƔzI$Qf4{`gN'^}e~;~ Hl 6\!06Lg+GyPxx0TJՕQg8` x~\@p5/,- '2zۈy&Vƭ jo~OcXʆDt)367t*&U1<S'6>1WB)X Y,vpNRYlpJ=MNy!oKMaw\k#`S/[:NY}ѡ1@a2 5\-yKtk He22 *S*W;0$c$`$GPhEmG ]p̔e'Ж[ҁ2Q>JE%wJKU/~x,й=0DGqegIYhH]UT+ƏMwMPP.yƠ֒/^ꋗXʳ*e-HС9:"ly?0/$w(]dBv|C*ӑٵwntbgpǞC㹶y쾇ne@p{l^VP.8וF*cc~EB cg`Zqe\2bpG1m٦lD2O V10hw'e ]9G_F(v_`A'.7QN20e `uCN6.vi\(IQmڮo@Ivk4I>T1~soQ35{}5۷W@;p=tm~⼯!-V!ud I5x+}ˈ(&:6%n's]UQ+d"4eTHQ lB- Hoz-'ry?wXI .%c'%%e`HDG(Sigi~\ #& P2Z@x`\M"$#hOݰ ).d\Ryu\g݋GE\lMRעo'ݤ U9v渨k\| eNk8=50nz֜ 㓴"JWqHۯ;QT{]mv) iW\lbeu+ljRu_Uޭl>T*_֍qg'BIUb<'@M,0ŇWdF ͳ!_5MPdC{+w@ _%-:W0 y1> =:s3; \罢lKP'_ (S? ƫ}FM9jY_&!P .Џn(|p_T * 01(pTԦ\s]l2W\({iXd0 2(eH@%o]pyi YJ%t*@V(P|ɏePEڬ.euhͬHurټIBM~yttJpkOuͪUSÏ2-( z5oժVM7K'7S^spQ͍?V|Ѝãg^jG8:CyK g:`W\z[*::wwh@ʫh *xs>x䚩x}G˵;? ݎCΣVݢO$KXNhRisE16?^D-C 09 p)-s4쪻/P p#?ǒN 󣁲h³hg" FLXI;\ `a$9 Zѿ.nƲMR݄ykkN/, &/X\'PQTVXptmH%]f;c#K\,$p A &xH̱S!]n!d`3 B&e꫰k0hQ<곰|ގeBf \SRrhYb<XM+3S}H:R92Ƹq%~_ _vQI[.xYo4M|VˢDXn}S5B >,@AULzx@|_`NUZY*@7=JO >*L}* Ȥҥ:?~,gVY4L6̃JSOj~YUn鮣agg x%X#zP 4omi_'TL٘~q9h)M[ jG&Y92=vOzVw 48.]U{+3, ˗,dH(MQ$u Aaڑ*7|OW/~M뙫Jl bA-MSٗln*ʻ的Ya #Fceo~\oPZl '&n]_W}'Z 58\q\\$ϩl4ifգR<ՅTo`>!P .KA/, sK. evPxRLRԧ0T$uz+hf.K BCdW/neC31&g4~W\2[Fa 5`؃T]p-HgfU.,9$1piU\<;xTړ܋rT^/]u[竀YA⤞y]5PK ͚z؉ Yr`gpf [;NOJy߶eʸq0祋[ע*}>O^ґU tR\/J^ ;kѭRA:`|TzO;yIG׺;@֋/3\霱s7p_~6zν)>e otUeFhR1_sSVNធ1߷ >IK@'}*&h01Ԟ6eɓtmRv/R'EIdZ|;3~ !Wkx = `1\*eJCZAiPFes0,D`2!p [>N!-l e]W(^tG 5pু2AH| X;D\]C`Y[EiKbh<AC?`_SAN>cD&jf xW=-&acjX`(boy\1*/C6b&QH ;mQ-ccq>+iн|;B=M.wH(m[Tn4\Ah^SZzH󥌯b؏Lf~%l"mᰧa!Qt\'FM)UPwO5"̷]qr'_OY*gcQש 7(n ʯcs)Mdwϱ N4?-=<_G1; \Sh`<7r_M嘞 ƿ՟_>XۣMC{w?r3a$i] W _{C>㟇25:$^l /ߛ*j S+oi)5(D:g 5(M/_:l xDY~qA|}em5 |i492Ƣ 4eFJPXM8mU~!P!}RO0ks6 ypOi2pXm: f `|a=/v&fr90'eKҀg99wɘQkFtYiqؙ0t6 kFfOK в;/+%ޯP޾"vnag3@? wgF|c2):=DqWɫ~ݳ}p EjYl߄L.|Բ{AgxfwIDޮϨ4u,: I_MZ aMl}p.}Ж:aDF"2˒@T *jHeQ]}i t03O.Ь6BEFfsݧ &_c@ϏnVA*_{,#m |Pڀ@ij.M4Hl!@SϣXˆ(XJ{H5pS0u7wՏi_uӆaMp,ZagiX=AIiWss^ DdqlN>-̬!찁tw Ѥ#>uFi@ոB9B< DT[V*oek[Sfvp+@;RjzlB e/ 60g hy| | j_\%Q1#1 +VU|6nMp{Ĥ,+YoQMIӒ-`4@ʹRIhPvuV3z>͔) H2TB(C QT*  Ei_恆_(T2U29#eJw?s'{]Y;}^{wzֻXisƮhrU!NSo9Y>:**=nql;ctshJx^~sBʇ~HǞSl@+?vA#s mSIv8Ʈr W}pW隼³הYWsڛ>%>>1 K?EFUe^Q2B/xOBʠ_j׮]IWCI)W{W껋 =Z% T tk JZGo,݋Y5}Q-FkoU?pY1\$t\MP>".$h-U[y&3\ɀA,"(g4נznuOtj"GxPA>s(g2[f 97\۱}we{P}}C\^Q{k~9%p͏]1} ~'\|OQَ(`9?Aub^=Az$_sDlLyBp>oe?$p)l@R P w0jץE:n;ae3\c ~}eSVSf$UBfMRD;v&=IO~R ]cSߋdJ#?+֌rDŪJhoYAFӧs 69=~ J< KLEž@m/~K|xvד/u Wz}RP}>|?Ϲ^ a9>UHWmj6FbNdjz E= /5LG%L|mGr^ߑ'p_4o_]L*Ծ%v"pH}NdunlJ#p =]"GSyUXuj yrju^ش "jCw3AYT a5.KϽ gH)~G 랡UW":a\CrHY~gq qȤ"!I FS?βޓv"O ex0 Xjs7⏔ MH #;$d:$fHOG TūMtFBB4GHGՕ{"]Y]3'!ԃi-E۾ оQz- d?)mP1esxfjsLD WޝjrDILIAÊe+N?.?{HXAYQkc \yoY~AbߪYzoy>P?ҵo^ffb(ZA\uh^DvՄSC*d>q B15h]#=O=o /Lg6CwCTiK~'z <{uwl__U'uJ\þ)y]9;~&}r^rWQSܗޫujUe<{z)Ee?s>mvڣ81}36 p<л@PAG?k0uWuHLGBB@>uŢhѢS߰KuPgeg:79b gxGKw#$H⦨kQ5r.8jzq t(f3I3Zjۖym{]~Qn滐eԞ/|Έ>%Iϯ86cہrX m3Kn;oT39ئsg|sIXÞ~CI<}e:X=ȳ"gYK:}otc&'d{uwXR?J'K!c~:L 9O'~Xn!r "UGdrO_ J 'dJI['eY5ߵ'D~X AGHebL# rn;$&1"+jQXdr@j`X8%ރX-.y{LR92(VQ/r?a *:bGT\;&|>-!!l~D+צ>&~G}͹!i@Eƾ#yp !0yԍzє!yq Adv\:2yx}w1ϖvmiwS8SM:r3̑Yu>=v-"!~Tf =6FG^8^R:D9$B *;Υ^>fKgʼn*W]ߛsX¼t֋Du_Z37yP3yc:mW'suKZ2^v:qΌG!ХCuY/D]˷]/뚧xgkk,3LO/ui7?5K%u:lEzfe6)BS*}aC{}SjЀpQ4شRp_NnϿ4PJڙ)B ;G9u?/ {ׇ?;ls5NL.}8+% ){[A qvHfÏa/yK' ::*-J2e_  O& i0pER0@ײL6Q?fI'$/8DPdM RR.5C/{5c$*[ƲPWT ^1Y ]6y;Q=bȽNmpv wNl g<O@m (ǭS)t|wk?t嵞 5nʱ 7HwG;?ZXQb:ѓ· wjX2r;0 qס=O[ȰȯwCB/]q}g_/g: 3EoIJmEDl!7 o"|Zi-6!ҬGVm(=#voJH$ЀbbrÓvɌW]͊RZX&HE@.8bOq,!$]"!rx&0|Nq  )Kĥ|o0>IJ'>)ס ! J|Z"M9ץͩKj \r(''l{xr b+3xI8v({QXkȽҋ DȁB gkWOHO|;o=6߃wOo11Qwl9' e/mېsqNڄ/4}O4nJ8֧/0Ϙ6xo!9m3 l!p䞙ԯ  Z^8$ABt> A(/t /<>7+//?Yq/03YtNC $t ԃn:~e@!`IOLCڼ8Ht8 8 )|2&Aִ 2ɤ( m A۾ĤēƐ>1!{f&`cԎφgkq|^?'uOx 4sTf?l߂ y yVE",ȓ]Lå -)*bwgS@"ݜ{ັ(mAfQ|{rYnhHV[>:秝~s+LC\D[b/p4pF=ah~C/إHà餘924*/xp ^ Ko%/:٧Ild&Ծced OK@`7I_'T-#,/1ߡ)|ss ^OQ]RoONAxe3HL|Q' x6j`Y{:0ڔA9!pd$W;2y^V![O껿Fg [ YxL.%9H~BGq'O01~ƛXA$uy;J¢L7ъW]NqD?#:&BQ r{o2 xl /|&OV&q== [Ej\C΃qEx2E`+6*H\R)-j[O_@[Hx~G1d |RCAY~MCV8R!}D7{KQ bp6`K.C )L3 4cR.86M`LB}ft ry3\+=9D G: ;_CBT:ArM{ T\c.9S#?973V[˘ĭ-RFfSK=bY2eOsMڒڐƴd/v.= 3 \S?Dp>%g@0>5׊{*ˮP>b%uK 3ln0M8(HcI.~9g, n q !-Q2d "8=|d(X KԒϖ˸L~WBL!#9tQkjWw6#p5.WxdȐ`ȑ)k\+F FZ1``0k\#pX1`+F5 `\#pb`#p #p `0k0\+V5kŊ#p `0k0k#p,Ə,^8xm:ur,`#p Fk0 ;ŋ0XifV 7ߴ/A^:{0v_ nҥoh9}bŊA:uݢ#0׊#p5 $p+ԬYӊ^ʔ)˗eͲ#C@N[/#s>.,SI!p}ѠCڵk[Jiܸq/A].voν+ۢE׺w~"t7|sРA@;.Jܶű槙am +V_-\ .\`0z(%OaB޼yoZ)ʕ`JvJ*sWTh8uؐp'?me_6f8na1X` ][H_tqº;=y[gS_ N- ᇟ?ZrQG(_wi"E}W [#HxajF^}՞_~y]w]PZ5]MW%pmqӖ#'̲b%ʗ\:=sO? ``0 9I2ARX;3֮U[=(9/[wl+Zj׮ʗ_># 6 F `s;HC۶mTΊ(+W,';,le ir6lt[`Iv?KuyKN8!MND,PЩ$y䑤^}+Vv9*rJiI)?`ʚ5km߾ ds?tmC >TPysW(WW)RT$~W`_VϠ~퇽mܸ܆Xɶ<}=iN %ׯ_0j( 6 F `s1/8 f fΜiJiӦy0c k,*'N +nuF,Ք)S-[kHyGPK=sKUK[/\%l׮QǏ,Y̛;/={~|f̘3@ux\o(E *}1DŽtP [б{|pС\a{|뭷ӧ;ۛ3g~9>JΔ~hSLqC 6 F `sl 0aS >܊l/ 02dGN4l0k #F \:T ۷owƍ(B .u%v"ϏJr-fΝAy'֭[TX1ҥKW^)-!.>`f[(^ڼyۓ=^~]wV,Y2@N$(}qJa-}BWJ;N:e]nSNwY1c?\`0 YÂŋ;#\+V|.Akѣ]0&+Xy43mڴI7Eպ{_FR?)SfkRmb%y TjO}ʑX+W  0bWb bU52e_o/"9r{Ͷ/m۶ &OH}yW< lVZ ?TR./nz1 $?zhsu7n\E]#xYqM tk(9vXXIG`0`0 `0?&MrZL(7H-Z8_DJh#)wr֭k+BOZ+W.Cp _{Mϟ_ K4 ς?ةuQ͐bY Ѓw}=_Uv.v|rG)Rw_.Dz?x7Qd jߚ5kM7tS8R8't2 7ȩ 6K.g0 Y#p `0 !]nf*Ӿ}oFC"*rWF#ԻHQs3)\"s=RyK!X֡C:U6m6@|Ay꜅P*})t1@z5;6 C0`0 `0֬Y,ђCXfP(eQ9j],y_Ν;7wl r)-y߉**0.-]?Y @|0Pӑz(%! ɫ`{%O2pᐦ;vH wի] TRfz]w};]^lBW /p6[ Hi[s&*;3ܭ[7G:T suuT35 `0 l_|r l׮]DP(tc7w\s5WK ЦT-Ydx+B qNNZR b7tT>.eԩnY… b0y@6#؅gM qmFegu);_p]2\k{:>\:#tk׮a0i?@N.]-h?{ဆ`0 C@@̩b_ FA%2@_|oVTil*N%0i"EҥKoK!Cj@AkF;$P͘1ÑŤ!I3vG'?&ȥL\5jpEvqqNzН AK8B7Uxo3ߥ"|}uŋӦij4$ˤ`0  F `0 >=믿\~ډ'$ʩY~~wc~{ݔ62kWg[ɋ)PSVRe] =O2wԑGza<yСy];Xl=\ÇOrBb J*!w܁]DDu}ND/"_c9&]#w3&IYGf͚U%1'ۏ;vG؞l}npp \`0 `JҌ,/3f#uERAM Xv~g@h$BD))BU"i\ wE Vh+Wt (I@:maC4Ժ("7)QB2RbAS~mr);#mԹ`Ŋ5y"x' B6HG Y9kse[ekŰMJQ~]G*:@ i0 5 `0 }fdNgQ믧^(b!7F\lnM^dA\ TS.T(x+bH`pI! HBwРAaxH]Cرcs$\/n }6J[Sw(YtmT ^rjժ _N ?d[lND3ۥY`6ŋ \#p `0 !8y{.)CΆx$ 74n}O)7JX6GĄɄD7o^R][R~wԤ yׯlo߾n2s6d4ӦMsE=`0Uk0 `0 9Ə|GQ͞(>[r.+2sL}衇R4 uI "ږ)SfKlF: *$+gRZxAuA>}\N]>R/2dHr9L6#O.bk͛7@L`{(t=Z믿Kս PL&XJCq6U$ndɒp.W&,\x'xWxGx w a_`0 paٲeԱnC*ԹL!X.)$I@ȦQ@+%jIz:{0 oJ7 ^ԔʹMXE)n"w,y2xzSJ^K5'yv! >?.vǤ“O>R`o\\T͚5WP7{/W7=c2 D)(Gs9jt iϕ֭[, ,:ϕZב\SG J k6lpJJCDŋQIaRW6J,HJvm[hNݺu)Wی/N- u]7CsR{ܥ|.\wcǎPHAl`0F `0 !,o߾}#v` 2 $K H/߭[򠋔}vR@KmSԆTZ~uC\Hdr\ABTySɓ':GאmgB_~e% g$FPzݻw@ޕ;ܸA:1p 'gy:HWhѤ8@B&C>.w3 \z-b``0 `0R I?H5T,._-H˃xl %bkq):7@js9NK7{O)`.o Hll#GtK͛>r/x<9s8+ ߝwq.v v "(Ilp-d:_vQ蒻Y&kRuO]nf3׫sIIvRPglP߳h0 F Ƿ~ri`ʔ)NͳcS)9  6af+Vt`ߩS'k+^P|Bڴknmܠ[`y9CЊ0~@lҤO t]J]ԹSHJ*(P}_J|qnH7RJ5djY~4Ԫ{} {~9m۶ul3+ꫯ'v6$ΟbcStO=E%}=I 6iӹ!]f@͍_WdoQGVv?bF `C5?fGTnݺmb  K -Z1KCBLE$ Fq XBPRYHk+]#s;O YIDp)$!s{챔1槟~yM7]r.8餓ƒ:(j(O=԰z?Pb%s.Oiܸ 2go=;Ғ7ƒnY~={T9awL௨D#>HXO%rclpf|nΟ?#K6l8I}zulש!ϟﮣcYߑ}/ROJv;oU7\`0 |1(KYf /Y."A#ٳy[~}'{CaZ=UmBAlҊl-zȆJN 䌵Ǟ?xpWlJRժUuYDFPӆh)l 21x9g͚55 yYm[n:7&C hϗ~~{_;yAE b\H5cmM& 6S%"oR*zޏM: $~5)\s5Vݦ7nt95 !–U,ꪫ;6s5 08L''T6@uE1+:g%Rt8B%|Y9w\WÇe: 8,$Xenv~hs L#lA,i gԮ]۩MUVR pD,?.lL=b!UI%h7)%VdWѻ]'K/I&A+Zdw[˗/ϦZVkV(zwh[?7SmGzS (VhjG޼yS -T#B|\)Zhq0*?:*)07h~f]u!o\T(_opg~u[zcqOn?6}jlO!0}ѫ_߬])U#Z3ͭѹVmQzAVvqc1&O!1`0 ,ycYҦ"9k>LaK[DڎՀaI$K BQ2iKǎիW%y *QKT0fC*xt WԼ =`~gٗzGrО~z8iҤz0!߰ՄBg҄ |SeZ{W>|k Cv@)SUf%Zl`8 I6啁UNC'99 D֒zTGb?oԩRxrՕZם wXC`]mrTTKm N2s8^Ü'2H␹l`cٲeC-_~ե*җsoذc"T1ߥ .jcm!I-Bt~beolZ>?s pO;~XXɎBG ƉRċa&J޽jR ֈ P_AtL=JU-i rn);4P E:Ny5) uU-LP'&V釢yOP'Vḩltmj?'&Y$fS0m[ ~T߾}V. @.+Y_"?j|!tQ~^T sMwФ{Yl^0`MhK {W{Y^j1X`AA+n@:'k ADSUF ŀTy&q&d @XF-–XvBw]Pc%lO-c9X"\Y`>-5NH@/D$X~We" B KK95`q (Y[p Hh"G8J_o,P@TwAZ!6\>V쭂)%pAH[Y4iԔQ+&GX1am3W^yKU N %tkUi ;,MgM~7h켐sud<&V*G*C2A*بnV(0I`5޳f /+&GQ _!Xm5GCjH4a/&Ἃq JrMQJFS-8NP0tPwM##D>P'zC*uE 8B2?>ID6`0 SsݻW/l)!kqݰCmV) CPN"S׉Mydc5CI[O+r7ibH`wH\%?hE _N'2# &Mj^έ )cY7EWڄ#ȋfelҦ{-!ﺤ8!A&G ,D.*pI {ʍ +b&Ҽysk;|bU"$**(5X-RAE7'=D>Nj|mG+eswڐ*z*R[Æ'pg$?ʉ?C>*c+oJ8pKy ?Ps_4Jĭ*!k9dnmAj&ݝ*WByM;ʧt{"Sԏ RنyvI"JTІQ&a)x'C.|?;P1sT[T_(){~,6 `0dpxYtg>EjAmӒPN|&m;<''iGcPEՆ (*PkEAotDFd .G=:b]>k 8%*v̡+q _Tքh298'j%ǩ>jY RV&.#p 54ݷ?1"/s"9Ⱥu\Mx{v {?,c2!VݰrfVs+9S4FZnM3tpD1ys@ ) Vwp `, ؅,e)5d'N1I5RS,WMJHqꋶk,#?y-ns[+&4 ԏw#V&Xd-!h?oPH5d'5 !IutXFՐf8R@*#KExg "Vmu~RnP`DwH W;-mQJM{9H#gb:r--\,u)o$%Y&D!a+HE,S~[X+Skx0 ˨A.A}FrFoNGII.x?0ӖncIR6SwqA R&)J^/Jtr)R^yKdX*D_K*Ma PBJd" E@ɞ#&ى_L8u.qWDO3*¯1[87 &Y66\6[= z`bkOuݸqclٲlk+M [&6S:iw#p #p `&@R<2) B@>[mzfQ{'z˖-DBBe8"^+0 Y7ߨt8ρ(˕+ #J,'\SAʧ΃JF]k}Y4*: cTGk/g;`PmE}ʒ'\þ#p 9 #ps/PBCf{ %"+`JZA>E*=lx;\+_.+kYǤ|ʻ4THSV@B~o+$JG˿ GDQlfD; x|w׎(E*[dRX¥` kTo)_K5iR?6]FU4Y0F F7oIsْ+&$Wc/\pmu&MtQ~PXFݜkNk0 C6M'pORWl\#rwh@ϱڕiHNISAuuU"t˒WE"^ȝ7eRXlV6[ފ pQJs9)=cٷ-'M6UUpu x"f{Wm;*Z]/|%P@@hjڄmsDfdw_w>AFkiB{PDGX8-L=kί)SkRv KSm?Wccەh+uSAArMTP% ӄ#EϽ8{e&}W6= j&?rͥMb>,x7BuM؟|5@DhY  oAhB&P`sMM̙>_jUoK.AJ//v#K=h\Cv\`0 8>f#lyflUF@<L6 ̀<[3#(pyRRٮ:QJ{lh)N# 8Aa%' -k -gz6ZrTN7)""lFAtתU+(]rj2XZ6WPkW`!a HQ|"rBOXhmE(vflXF 5oe;It3Y+`lg|46.՚v|HR=12%RT{ &&}I՞7]~@yTIIIڀ4{́MԻgAPhsTV&"KAZ,@MTs[!t o&|?5i 6O?WC.p& I|rG)֬YRMc xi#4Wu*FfӍQ҃`0d#Ha@`A` X^8nݺy X}X' Uπ+ԫߎePaLy4.8K&9XK7FCN4W BQТޤ|Hi"nAIU].T2--`2 zB|Q..]c!>e77"!%KLy2SJPc?,|%~ :_^.\Rq+B2@a+@2g@IYwlDǠ:'=S`0d#P5!B]jVsqDQC4NvԋٱU IT cE*AUG 0/i HqBA]|gd&ΰT96>q'HZmQiof{8bcpd-d/gsȅ>HFrFrFR %携[`S4,Th[CStab\M,ek2Di*ZߑH)IuIyJ]W,9. K=AJ=!AA#^ HEPcCP*9B4Le7;@u KCa{įбH\~ߗ8mzG!dk E"wDL=.\W|SDu(gIj!'pekA|I~3?5d'5 !c!')SY&'%^8 o޼.8E=\[R+G; ,u!v!t&R,o# E 3^ʼnF}9 IӀTpѝ9ss F+)n|'Ntu/ԝxԅ +Puij7I=XK^iAPgӧOwj:C<[HwI VyE ze&/6ZY jQ09r]H~?m V,1"99 ! (ȹwҔArd9M>&vsY2Q’d)mg݌KyN| !gZީ)RxA쁿- }v?+rES6{Fb_;6&!)LaLcA,y Č߇ 8ۧPo9.ǽ%6EC COi3&ھ,9EV>@D+{RQMߌOp'\cI<3v`׼C;c6h/'ڼk`0`D`1PKma 9."בVvűA% iMp1"Rw"G52N8 ]q`cASDF[81e*| H@E@Z@C.+؉z;G$UrE6&~ QxDB(;]""#JUc>P`+>%̂6h/54֫W/e`56Wh;^>Dˆ! P|c "Z]9"3a'e s> \~@gAF_O?B.ATJ@𠊄UW4K;3^4Q;@C@v31s@MpHd v̌9}gfl|Ȩ X};1OMڎhm+5miADk&\۱W7jL/unkTY}Q%?M[BfB4FWOHHR߆3®h|79ONO_DMirs[3; 䝤,W>_ !9>ZݍiLcg!a"G.>EV~z11Ow;oLl`0@ǟ^QTGjT1 v!qÙ"/ d"^l! "px1A i"h_XyUTqAL"E #- G$c]TNcc ) >#Ձ8礽 gI'T!pع/Ols C@ݜ9DAH\Ᾱ>$O:@ŷAl{^KvZpp}Tژ`BNSr}2AkgNDꚚh_"p: HldA&HEE=N=J T9\ R@ Ҏ>=B .zuʕ;c4$ !':~,O} ؁` d)y%"3w'pCgcX9DALI;> vUTS<# б9;u J`;nS<_^޴΅S!ȲVHd'+@ﴺl2$OciBsI H&#ؼTϬڷяD~#'n\]6CDeaO*X?>lx޼woY%htP%>')c ]lz&pC`~+?Q 8Ƙ_3~r_G5V9Ɨbi#&x i.XϵChD&P  щ .` cx cp#=OI?}xu1<;A_x8vCc}}PA?`m]ү`cπ؉n`0H-`+% D5Q4J2D]9|bs5C`|OBO+vH+$pKת '`xggUlw48Nå\XO(''';+H΅Q+ UA0PEB R>@j{ TD8 (Xlj:(cܿ¥Zw;MHr"#UOzRuOta q Z ޗ \zU4}ƴPn*p{*N i.؇"p|26`yڼ.sM'-dSfcD:6V=W0މa%Q OLA(y {"JH_y}X9x 9Ei@~0Q=C~SAe  N#D\ώSbK(~gvFQl:I6 y油kd'4y? ! S7 a>JnM\"tQGmgz y uz3j>dau܄ċ ?ȇh~좻ƄaGW( |i1U֐{ivd;D? {lg=ӧG'{D?h,͔ #!!ߓJAA0C({,ED\_Է/RV J3|{N%aUQ{*CcG}H{E|&w&l "HncU}j,&<9&$D>^êӨ̼+(M_8O<X+Z4IRֳߒ]O@/kJjg._NvjDfR?3D׿G- G[tt&b00:;c4}wv#cï:2SLґ@z&rgl{Zgs(!s}57pj]W5t3asu.WiV[@7$2/yƽ}b!) .7q& {B4_D>>[KHQc\wFk0 u̼KrTsN^4e>WT"pHNXu*!;uhY`f qX* Qz CE~9el[ -#*)kt_-Q\]̀bNc17*Je\Wf`8ګ nj9L/hHQ a<m[6I(yVڡY J/BN8^#Eݟ;I"ϦC#gHYX JAϿ0}NJX &{)@nt#[U&}Hn٠/XGbcI Fg^@A4H'ewk@2jOkB=@6Я?2тK}++jaH&y΄h] 㼞 ҒcN8o qe"#\sD~]?DNs)Crlfb bXN5(U04I례oܣT &gWY+Gp&L⋈Pn? 2y{`TPCe 6Se6Rڨ'P.8,mb+ ϗD&T5e@۔l A[$Vх4 z65x'x<;Q[e>%Y.ٷ/w`J@n3W嚺*=cAjl {ЋId<(Ypz/-zڛcs2*0۠kze^".}(;Tgq’dOeIFOz `6S؃H*SD<(۞5MX%[ KcmSPnT (RQ`L(fOf6F[`\%ěM.AO#B1}JAUƄ" =~g|B*B[JVzN}!iHLd2UmH"3]-7)9I2)H">oN_Fv;+љ#܄uH3C(z@xQC >8~e;X;+z?!=uS@f}K,?ޚ 1?ױ~'VAZ23fq# L{_꧓ T!9%?+]~*oKLE;קwnD~8IP ?$6<ٛ6=XU6@I[JG'XRHi~yvߣ#=>c WJ?=ճX mSLoֳgv|>H.lLHF<+>8B 1|fj9E N(F `n͵?96uq9"h [D!r\!N8(nD"&9TIS哫''33nޙV|䗣yqHbKa q _*-pcRC6ڨ/EiRܺ:چzXY"Zs %ug%;jFu q'ҭ,.ߕ#ޝ/qN|JhOsmLHzgAaSİO*!p!e4/ fc)\Y'X݃2!Ӝ*+M!pe FXf 1 0UZG*C-#Kݒ E)(VQΏrڧHtMUayT=-"~֫D$bBdufAG漧"*9%dV-|nwLX )?=Dp.4cDިg: xWf c{#DME)XLCH*k{neutĖ{'ƆbX)X>{]%^bWP+VDMl?6ޥm?^o{ CA[.jn./20u[lk@ @P"HB}|_um}lS6]噱g} NltveJ"POVV:&5*.1O=HeE~'"ƆEMv! \>L==\/ߣD-4&́|G]0m3]f"߁dO[LRʈiFQ(1xBQZY>r"1d0emztIu2:狐i[$`*A\ Tz}G5W.YO:&E'Cx"eH Hfw 6 ɔ-z.%EaO dEЭLc!(FBGߪg/dETsE|}e&pAؼҷc"I<%p!l "tRIϩmǔz:H) $$ϭ2gv*@S,7ilN¸rQ2xO]IH QgH[hƋ-YVL!\?d߁| t4I^+FeJf{V~vܬKcua `Ҫt äS wct}_Ĥȴد()2.>b/jBؙvwSH!ҩWbAUwֿx /K3&imtQ>S{_i7J3PP|yVQDJ>gJV4PSΫ=:_J@|!jEH XU(eS66.1`w؂_Jkj^+o74cs$[y.޿1elyAϘX]F?_;x_c7LϿη)L5 0 A@@Յ8`$5^n z@ "4MP+GO7e `t_ʌhJJY:VrV!LoXE\5tlǙZfdC%y#/)FgT\N)W  bPNѮdAy`kU# BB ! p]txj#1OR6ACdBq|tx7YIO!۔RJ<ݭ+lJ:6@|"p Uqq㻼5v:J93^9j*Wl v\5MV0yHc9_|Dd ̝ :K͢H\ߎ@nbϪl&Yd }Brn_LY5plΣQa*^Sl!ʽ_4Wq*9fmyWVXvgr"٬.\S*Mw1bllqM#Xj8ʄs տ0l!TnPW;6ΫL0V&Nv8׭1;%=64[h-l{MVT6=BP=*Ygd$ۻeUߏg}j|bo$G1*"p5sqrP$JȾHx?EKw \cE> \aa>\.99IB0%gdX̑e$DzSrRc7-55Ə!fX*e9h1Qw"v$҄%iB/s6%ҋ4r >bl#fw@x Lp[@q8ѐ+ʐŎSjM^c{ت 3X'؛Rjѿ&#l'},;<32A@ .78Of#w!qثjԨ?`H}NMB,ӧ>.|'fSWg2T\ +5St\DzZG.o)Q+R>C&KMbפk9dT@ )jL1&3>ߕ%p.ߌn#EU.il c&zs ґ9l\}jlYBLN1k0 0 pE@&/@ 2"W&A6WpBPOvtl%-q /HI"~A*ڀXX`cH,ٳf=-0 7 G]Y;4ı$ YSZY,g /KgML1Ku^aNNa|” G#p^BEpF!tbC6 ^;dT}A'pAS @nTzb `nUnK̈́Y ~'=ḧ́A+.,oqC2&2 SKhVU.nX!pBEw*$UlG({dr|S48$#PKԷ6%l*2`5l^{fK@A%CT+uYHy^!o(QC%6>\-&8Jn̯`7P& xFt6a7Ev}dV=8peACA*ϱZN[q?e \eC7l栠Ĕ%Sbg99WAbB4(`@DTv-kkȊ\@dE 2j G2;D nGA /~w+S-6.nS7tMl>6@?@FA"+ێIi<kMO)L:ARDr܂-)׎ :pwZ|YcdAzX`BxudMRF14}(Fnm2p#0ǵdCh{i*\B@ GII7(= 6=O]w\{aCed?O6  ]&5No&_gJlOo(~Wd/Kuy-g2~<{& YH^-KDٛ)odșcS(ȸe8c:^BGrVYl2-XK{Os9C7DȬO̠?N qΕߧYµSW  lnjS v" P!2̈?}N@LGV{p`_ܯ w!]X-]oN/ľڥ=%p]B>]u 3 Ѓ,ENFf7( *p8L5+uW&{s E0Lv WY;W$d2s>q4Ȕ#uHr>)KVV{ D',1KufN޻Mqy@KiTAXK6edCd()[UĕrhoP59-M-Zg<2/K^8݂ "ų7*jS}5Eأ jPGGFP>HN: %KId- '{\D'h૎i Ov3DŽ t}3ElG-煼#p^ \l=~je3_ TƏܴBHV3m[ΤEuuRftkkB)4[\[?y^_d]Lqay|}Lm2/y^;:$s9EMRY^O$pGU?课oV:e]6dB ~&?Z{3ʸ}*Î0Ƴj(HbErjB.>cBԗO=C_~\+5=}kiCҷdO=1(=Jg|FCi OWFIp@%W4k0 0z&pJ(|;|ElLmHx4{ %نe8]eA9WI)nV!'Eur|$HcwclEU8 Ɔk,s}muA%O*cQ9XuYeA[e `kāUu-W)PI@9%V65*&pumYZZ7&#bQξ, lR6xA{{QO 7<ekj3l..|+e,t7~\@U% > \E[@6V=mv=,!@xeyDV&|7#1(c{6Qee|oQ$p+", E|[Pǚ~a"p bCE9W6{TG Aטd2AL qנ&L)IkU^N4)oMdlJ'%96ޱ(G!7^ո݅{<%-OqLlyW$VL\taW 72pW\yulI9AlvH7~gp&2p)l#:lL&2@Uisr!\e(1t31X:uKܦ{|N4Hv,Tj@ (|jn`m)]k4.5ʱ3 et]R-60mu~&pMa@>ji AApVf(8²}9g{}2pB78pr~ ^ 8rzQWEf- rAK6| 9H^ed4LɌ+uBDk,+_,չ򪇸1~71}%'b#r|CH,jo =;q)-AM} >jVKo \6b>66 W9^_"p\j/;I gx3oPl+H(e>!md(x>[ UC.ZF,+j#ܥψHۦCr9 oA$pR`O7ʎslϹ=DW}g.d,.{뭟ʃs_NSmd*\H_t \lulI9X`tH>-VMY݌Y JMC-*cv8u H;Hln9Z%X*e1ɸ!dPMʆRd$􂰉*{N 󛨹R=#гl%&;&LUGj`Z+D*DgvVX=k=1J/ČKb5׿DzQn&9סaˀ=E&8K+#cTnԯgoK&|]C3M^7ݢE \aa4qt8zS6"ȶc9)% S ˱@,>Ȝp@ɨЄlK:;ä!!F/\9Mg`$Gk[MK^sYM9CGӆUdQJȹ p!tQPѽ.\}aCd^_9uD୿T.,Q5'r6Gp X%@M(y!g1BL<9?&87獅UK2!H0-\,&&E;OSF7%_: +x{2A(@=+T`hWKb'{F%ؽ]ϑ%Ԙ>K=NQ<2QDM;$1odArXב՗IOJ~C*5p!cCUHM"fs9!"T*\i_j/ LA\;bx"Nd `"!\җaKC mW2FG읦v_8M[iߺ2'da'-V ٓ()YF 5VC) ey$1߯3@d\2! ;CPf%-.Qcdaw!ИdC Wؤ{ AJ"z$Gj3b\V00IJ g]Ĭ(-.s=K`ܢc[_;t%!p32ck okUBā5'\~HGz.&aC¥E)ܘ$DU{A)g=-6?gkk ]P1ˆO_u \0 0JD ID@%2r 1#QgJ 24U2p YX&`6% pE&5%+2~!Uxmd*p RIx `<&0}jN\RC qXhv(H5(_=艀gnהeKBm_|8v6申d[$!ܧ\,&3@7&wt!XdԖ9Ey % H=>RL{%} lLsd݈sA7qbibC~-:>@ؔ@S>vF1*~ sd,G9^r _} tBMFFD6Ae,}^\jS+ێyKN[ !Dw5MvB^xaȥ a u^wsW` D8˭!e@Vп8VaK'UUڐ~ k}I5+6J6oHwMjmvQ3^v"V*0YT]rXD2eƨO~aNdH)͑ HMO"M\AB(Zb=1+cҜ6'C" [ 8%T)f/}/6}ػ:֍LjЭ"prI'J<1F&m$2'Ff롏kr;$-@s .Ei_&!y $"װk/q:u#esV:0f._ɽqSrd C0Ckb4xUJW8PQ{ypY,'&Ndc4?%φpA9ye꓌I&puڞkӲEVuk]c~_]B[mpc!ߊ0td\Ȗ'7k0 0LK q#g jE<=0*9q 8:W7v"%p! p $!Rb&Q`q2)ferLT9SL i1nܸQ Q)9-g[]i8dAro؜7wqrp+/7\2슅r8AZN5<꾆BpNuzZ)p^! ,gxN$Aɮj9]k`'2Coyf Js@DGʞڂ@8\6"A(}A4)8@@Iu.d;)Qҗ֕Yk<Z{$ ԙ^] ,MP 1zWІ:ҡkuCcZMkD|GM9l.0n" ?3V:5ka&pk@ʑr?t:9kñב@VXtK 9s%$ga?Jp|)􊌠Vʈ َ8k@C#v, L 8K’sԞ]- {h &Y9 LΖs{evdDfӓL5,"&H?y%!t݇Ґ2 !M`ҁAKcBD9d q[rvZHdUԏM5˺1l\2F9C,~Z (&p9/ϟga@VeG 5?w?(3/]N*XB[weB,%IV5Ʀz&lJߤe%LҷxQ؛gaȘ$:ɤƱ*&xN 2KVDX1-  U,ﮌ&D!S`\~Z2Nm,ǀ8*_ 46:ʆL ` 4uu]+2YΒUk4fI& SD/A,~8'QnCYG#"t,X,7?c!ٟL,=2HVf8ٌEƧlIhΌUĺ=e_GKW@!;ſ/w(.4S&m %_ۥ>zulv_s,QDk pupMwPuaa$O rDw_Ax wE|) CRG 9u;ONrˉL"vN8$ (v%kGl pK)㵊]$'`Š{T5+ gZDĿuu2.;(ZdĤ _ g V$n )Xjr*R˲TMW@c1[ _uy Hm]%GA+HFz,%"uq6먶|B}cHJoR/A"5zcd 8;} 锭Ҝ.A4˵)Nѡ +#h2 L[LЏ* g92_[QusS7&c㧅KZE.Y{BdAJA>FY}>~ɥbwx)pBҳtw;[8B@*C/֐Q S>&WQ>E)@z=]χ,9Z:L0r xȠcU 劘xoq-fd{2*ÂB26~P12"6IRy;.hB>*MѴJ=8 L0N \tkdr1EcƲEc_uͯ::>jS{Dž?˓0Ps%c:^gCaet9 +]lN\24Xɱ(DX碯@MkFrdF2_![enn .jҹS~g.;fu!i&Wg ѶKB䛠_Ɇ%f];>}QC:ǟ-$DUB&+K C2E෪I9C`p:~gΜ.u+L2Cȕk(/C@ ;yEs,/'d`B\8.LXNAtlB]ȋQ]"[l(:z9.9Ic܂t LB`G T'җb@r2ynLM:;Vx6d>dDg32kKQ8I䶴5KFgy_DG K~d+>0VRv@1YX ʓhc g򋱆Ph?,:e7db?1B=ق,$Zu O%S E2|a"B;͘A 9lp=wTuQGN6 M21>_JM*L0A3בzid5:|U; Q=]گhO$>+G;N} 9*_t Y2>Idb=6/wj6[x nm׫U"  \0 0[,o/d*gm9Dj S:Xp /iG-ˢp!!]@fѩlz@+xRD5r`RKrcy bLgPcںrRX& \onQEϨDV>;Ug(}%Cݕ.;# Cv'PI~QD$sI@79=G FfNjӭ/xϞGYR|l`!DcN&5ʊ9o;kZ eYuԉ\ \e (x UAٛ{5< S`륧ۊg lׄ8#6-@ P+#l%]N3I΁<#-^V6ěFI~  S7EP]YP5A\׿<_ڇeE: ps \BL-6L7S)YlTY ä A5>><[T}{CB]y, u=٣mc*-udEa~ l΅ɛy=̈́H'jȒaVWCȔ&ң\LVt6䌲L8Rd{sX~+TD؏{12*D,$0}=ccENҹɷ@HD@YEgzFx2Xv|ˋTK=so䣟?"_`$5? {2z_ߟC)2;&pMa r@vV,[fU%#l= R sYGB{~Efk8PG,cVe{pN>5nc_# H}o [=HFNEVPpFSݚL^>A,&7oԒ@AKIއs[2dIA&Plt TȺ k%8cd,>(Q9GW:Px_2h3EwH`eub)@ؼL=EеDLw! ~!dɑ{e?AC S>Z:A5/deJ.ơ|IFα 'P $ ύD6})9(@{a;j}=L0{ET^' ?cƌI_::Kv*pM2H슎m+>c"0gC?E46#f&?1Ao: YҟyA5JMb}"wQc9MUδ>:U|E<ݐ2~|Wvi+CІ;75/ σ3 5B@UR\;8ҙ3JhĦD KZ Qs  m= [E&|?l.~,h(=ð%\zD8^U!5dB9l^C!T6(R =& .[XQjGLDZcǦ-!չH@2R2A!}q3&E8}`|F!xgL"Ę̳'` >3>B?8mH35ka&p+v'c:wB=+YJ`lY"p\p( R )Ci!_ #@!8d =$AN2888֐'GG2n2TЭV4K`J9qq&伐 ׂs [yqI# sl]rssNtIt[oCK <鋵!zjobI</c q|5VgR206Q:9{86gwMڅ d†{Gt]]Ch3bx )T^# !;$n< e׌N@_W1㗓a^TtP \0 0IaƉŁYYh,3 "`3H4;p&=~Hrχ`Β5"s~3SN8]'ڐ8K 'NLk h$CLWS x%!UL ;V'@GBOhu6 kC  /FݠnIL,zXB" [)O6C,ߢcEv2]Lrsd.`<1WcC7eŸzmf΀QɈc]F/\ `B cSe,)['>m yD|uR6oGI}ں&IR}S aelG 9+6vͫ5 5O0~aa,X܅u8ԏӒaW[2!Ʋy\QŒ5m,B<Ѕ< \~NMUTlzŠ5m5M5 0 cA :YQ'L˗wцf{i5-k)2s]ncF \0kdZG-m:OB5Vkf&M5k&pDB%Z%0Xf&p 0 cх :Č3ҒKxQs:5~Cf9 R`~-0k&p" Z:O٬Ͳey](FFB{&pRѐ(KYʖr*}ܝv)ز \0 XtaA K>u% "@ⲑ>$.#L \`#y6!RP^ubgLS٠<Qj5 pI`:#; ww' }gCan> Qw)ȭq.;nivm0k&p@s@rVJZ+&ΐM!VuZ-^%qe[x&E +L \!СCOfcƌ%;w9M/z \0 Xta( L \6 ~պm۶Wx'+'T]>yٷЪ뱳[y\;-gΜ\~xaшa( L \6vL%T6;.|S#RɘRDnk]Xa(5L &p!lmXpf y>)  \:>ڷ}똣oȰ믿뽑vڴi)aF# \dD \0k}Wv=L۾tPvYgo?vA;p}ɆP߷ \0k4$!fS{.'/XBI\.曧kn"Y[ѧrJ6eʔlҤI-EE&Wdp~6vlܸq~xaш wUWz葝yK :zKʩuY]DdS W$\`md)+lP >m2裏6qYz0x^_n?o ۖK^} Kg].ۤIqn#K]K]#GLی.?裴en2MM['P:{yL81˝0aB? pEؾ2t4L<93g#[0 XDܝV[m)cboac 6kV p*gcw!k%S%)eFl*efo{0+LNupVM >llb$en{-d_~y*S#vΜ9n(0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0j:ӧOvM7e]vYֳg϶X,bX,dBlz7g/b{a{͎8∬M6 ':tk߾bX,bX,%bSn-{g|0{衇aڵktA>BbX,bX,R ]{)?̾KaƢ7|3qc9&_RΝo{G}bX,bX,ŨviK[^wu% 0 c5GuԽ/x>Y,bX,RrYr%w\<0;Ss_~y0 co#k|?O4o6۬bX,bX,M6٤oV;͈] 0 cop;gna[,bX,R2!6mѢEv駧;^|EaƢW^y%{ᇳz+dva:[Z,bX,R2!6s۷ovUWe}q oa,ZOw}77n\#d]vvml]vX,bX,dBlڶm۴yóoAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ;z?z/zi4uYi}nKI3zᶰD9O=,%8㌤nK{7ߜY>//{7+Z{\s5ٿl̙ـPab̘1ѣ|}gó?ϓ2~>~_HY|wx}wg<@g}-G}4{o1ݺu˺wn4t5СCv 'nK vsIl-l_ǎ.]tbiXaF'-_N<좋.J1=3)e}QlH>}'Ȟ}Yކa1?'$'^yl=H/R*oma)8|Ǻ-,矟?IY#"E=XClXmĉ 0 aJVJu]ٶn=ٰaq9eʔD>(d~Ffy^UB[A@s>8.wO?MSҀcp3fH4ut =0>dȐ$|8E>X!{erڵc>rY, -9A;t-c;tܹKUY ;cX, +:v,bsEIKhrK/d ks=ՔM+ B\kq) |aFm1a„DzyAvSFTpfJ <*(Dɓ1 S "rCMd18|Rd/{i 8F]w]"vuMSspݔHPz%{B>U&doqI&Zk;oӦ͜VZ-tnwί[ئ^-VC/M뭷^~}~ii`#˯%?`W[=Zvt>l֬Y~;Czcc޽TʺWYe]wLgmo0 è- _PtHh!kJdν_Ift^27x+2Ҳ[obwA/6m5?8cAd's?Ck믟V9ZB?D r3}$?~KX, =r|<(ZN;.o=xOHٷg"8.l0 0O=TAh^vFdI y?|~W|~*Ap ;[z 2w_ ;WDJ>3tʘmH(7CN쨈]K Bkte-d9;}-Ny6`kd)˵>R{ ӗ]-pէʋ)3Y=R~` e[wҧ$Rbi(]c[mSMpSK 3+nDlɏ62K :K7 fr]ڮguVC%۶_˖-#裏αPeHI%l(u{RK-qSaQG\YO>dozwπPW Ȣz<%D7"ZUV얚M^Dϖq{2ygI';NyewWB߷A|+aW:s]CH9S^ek4en܅cMD%26/r/|%ÿ2r ٥5 +-rwCek6˩&;Q`l,hM7ݔ62]|D2o믿~{MXXz32p^nR8o0kau _<"t7T6c"DcP_蝤LUx,l]!zEЮCҍ(r[F /4<^/N$w9:b7m6ٳ~FRZ~_~4HYPqW:f~kK.~vͥ"q3&륔;ܫ \b0k5ʽe{nvGd*n%Pf82rn%DvmݖV5 0 ڀds/ 6dFXd* G#,ђqP@2X"< O#)rv~;B/!J'Hf%o?ZŹU"UuXLYtKټlߵWQ*C}/w*85uT&p  \bר :S.(I^{Wf*}ڵk]uU9b.0kaF \6!6(AbBf Y:}-D~ 7!I ֥ΫLYy9$/A?: W׫Z"Ew,)W"U3ҿQn2lE^yJ-PV!p+ xI ܱr3fFw uoqdo&mc_J*Qq2 \b0k5*QD*jEKmϞ=sM4L={ߨ[6ydaaT2eJZP UNt9h#EqY׮] .LHٖ8)"~/(Bԇʦ=F"uwg}"BW^9wyg'}M"fܹ3ٱQ"_7):tv:k61ۍz"pZD;UDT_2V--YaҤI~Wzs^ehv[5K. \b0k5*1"`mzW^yenκwHgߥ?|dVZ3+x㍔ZHq \0 0Sf&L 5S(uS}]f%h:SSաC.z>5վzNF-i1WоU|9TX dՋgʜMY"YRĬ4䮲g;9ﺞz]\ doe"S)ӟt'sMrd2}(ry[W\qoD2#0kXL&p-FE`%#+Vqg+ۖ6LެYJt9ozkKq \0 èKP{ ) f_!jB̊LM9}]6=SFM5P"?!UeDR _)fK2bcS1ާ}y\dT,Y"p\?\|oD63(+q!!zyǐ{k0k5LZ,&pLy뮻򗿬rg͛76|L}t7o>;3slru&p 0 è(Llܸq ҔUHRғe>ȅH3\Eޭ,DP6ʄ=E/,DBl"gɴ嘔gf2289A'HcHU!g!l-N߅|NGn$/9-y!|ab1kXLZquӦM'^vev҅)JƗ Kݻ \0 01c$%!iqJ!CYg9gN| AϦ [cH8)d.++i|"`sABB |1$ rY%i˒%~OiӦ(#8_B.ǂX1K[|rlJ>[|n6-?h} } Lp3Sd7TzFqi;Ucu1)@M[a Gx5< dYʉ}NoS` C-/1.ƓDE}p exXV49T6NpƟȥ\ϰ&p-&pF`ʹ~K߆*Kal5uYY.{d[kbc"򀿄]{+!çZRC{ddԊr+G( .[kri_-[ce 3&z5|kLNO$QD6V.FbϢYuM{ Co=ϪCl~$Zm;g"oiC쯁mks|c) M&p 0 è@ZE6. NBdpC$Cr5"GFnd/pp>QXxc9'r>ù&1g&CrDȪ=2.DD&GY!H|qngG 8\/׃~yэ"ؠcB@ \K`&pڕvʻbXg祗^t=^9ִ+ϵ@iKA  9'#?qǕVOlP~u59GEPO[Mp^\,KMc;7(>hƌȾON;-ن2=HƧK/) *EYDӧ rDI^[` h(cE;cǰ}رZaMnR90~?yXz~s{dZ/~ҰL^mWչ*lsc#p5D.<A?L|5+kl ǢcӘHֱRUuW+>v1yL8V&|_CԖcĦWvLD !ӪUt<;#XmsڪZ㏟Gs YH|_;0 讫9PPQQ_*]3aE%Hi! 9* "9眓Hh-fa8{[U=ܪ{j[=i[N6Ϟv/ZlOusڭ?6uث2N_>{=EX5q!OVO}$E~7kdDqgL_Hn ps0&+t>O·'>p06jVSgs&d2LY pb\CNj d8v} yBt {S^N\6m&xEN,TkGj%ˈ ӂsNd1p\&) 6 <{N{B4Fۨ6LS=ޤ$G'9D"o{m۶/8g`'E{y$eMf‮q@ `^% $MPm=uhۻ('On>R5vE7WVy׺Cc+SGi?v>iW#+5wuTqzimdM}[kիj֭jhwݗ{{rS2g\*ɶ!-~ pk-˺!߁[|cX1&$` %WNlo5Kr,+q'Gt}ls('G^;@H/7k2L&ɔhKLr5~@2@@w F$3XG9(3&:1Q@-[,l}lžs0 4Q`YsvC}T]v+/aMD4JmN׀A?1!Ԧ[2h]uLF,!Aл(S6&2ߩ|_І:GO>b P.`є}ao߸&%r h[vY^կH.pv<¿PTPSxE6Ip6p-k~=p%ʖ}z_tFj2W׵g U\ h6EFE +WrZLs #q>Ա?v $Da7>P"sEۚxtz'SmV獤WAHl5>5z=FP7~:7E4켜{yFl|P|rǥ>|O͠GQHsM~2ÇRhϺ$',ty"\ᤙ#Fv]] quVkׯvJpK-JX }j:2 -un`$Z6qp[?u=<&CZ-?J{?iz>]zvM[suTv5Tڣ_;xD?/i0qӖyԷ;ћ*cƑ5ᦋE]Mi4ukѢo?~8G8ql2 7;.fc2QWJ(/r0|_5 !%6lI ]X߁'1^s{W"J74k2L&n:.R^5tՄtq*g׹yh"pZ)`QG33@EY}~&گsä[:-< ^5^ <,mMԷHV oPcb-q"M5Y^鼨:'6QU A|!-LJ#@O1Wn@ e>[/pnc8Md ,賫Q~*yD)ۅo?nH$mRy Ǵ,A;ԗM4VwEpDw(mK휯'Z*FA#!u=BQfH4 |iW\^'Im='m9fF ]ʕ]=nVOɰ3Ngt۩CtjruʗدMk'* p&3w+M9GdtԹ,O N6Y $ W>\GpI\7.48Oy* .@Vb|O&%:;|)>Yxm=SI@:767q?]}V&;Dm?|KT^  rWvҬ>A鬅c!"_ v yt6lǹnynBoPA*l")ukӐv 䠥O{uML|r7npFĴw2ѯOضh9pK9? ѯ13sZs?˗ߟ~[nvEm 6k2L&n e  3cr09gAnJqCگQp |>3@ `&kBJ=;-c00I4IA~L!p c1x+ېv3&2QD m (2#AKCT ^ u4\r~ _No!g 5jPb}0%g&6!եQDnMЯKg%r:.s#ECnZڽxOdG 2 mC٤x,h3Z/9giG+o}|[E-uG?#mpWT @Zz!|SIlie|U}_@TcPۑ? (6pm?}!є g~?㓉<ρ @Z 0B6ȢNS-nLշ>~g B`h"ҵn|UVa&715vK٭T63A!27a׾}Dr|0uȻernGkPٿ_qHڎ 4FD>m9~ZqKF\@x<+U3Ya"zG#|g?Q}s Q0Eߖ-qڵ)"7k Har7_.v 4/7&q pi[لzbŊe1Wje|~oq:_pGy<=|\P7oP[zP/9iKuΡf͚!_i?W?a'Qxzcw!) D(\E~;K_FG&!DͶj wG6o/|) \uF@Ճ"p7Ze6\|TrW>sqVq@߯Vs3(#.7GbSnYZn?x, w3~ X,!EXzL yx˸-es)[l믿[eoF"OZ8S}xi^}'366k2L&ɔ. O4*,hzѼq *j0G*(&sL5!ԇ B O@=,v ʦ]@n`9=>,&*#v(DVQy:;/@$@[8 F_0<x)9(0Qto~&&%L.h/LjXOynGK}XU4RuHOpH%/vnnpl >vX,\X?@,BM3 ;؏}J k$"9_ŧD#"@> RJ?R2H2ek&|x[vm߷<BTwpCepb[lѪ>-0 ;wD@ynl[nc{(]n7Xcb!%|lꗑ7܈^ћ;SO=ˆԏ؞q_*%? ƸB 4; *Di)Əs}(6UV4k삻˩~ɰ܂ V} NpM&d22jVOy hMg/2gq ~5~i AQ<Vm{ 0w Jh JpU @chp;wMnٲ2&IuXA}ɷ˱"Utɷk,cbpCzT84oaB5߬U>"npONOO4@RYhR3 ǡlK"iBD.!r r8EVHsSNii;1j#Ft3|K p{ _oHMD=2r3)FO+d4ŷE\mʂc8/Y}v9P~mE|"]3]OŎ3BԋkSw`%K#DUWhQ"s^{Y&>>_ L&d2e#@Xp hW :O%7)B{Tǻj&B],?@.E8^ nIsXDE73'%O i}}[CF/ oؔ4 { ]ņ} O׳74Dݺu}@GvYd2L&G,ۣ\kViws =(]4Q+fAC4.CΚh]I{.,'pi'@:wXF[v4Wu=/@w}{giY~ #&Iiwo|^X)㳖jnMOsg'ɜpCj Ӎ*Wvs9btGfr"pCuMh"mS >0MT\5z]iժr G<̣\X ؠ CmK/}=~DDS~Y#l4S6Ajמm9!B>m Q;~nQ)g󾋉].˵UVnEKۚV?ebf _X&|IK.#7Zn$Q;~nr[Ǝ}+9;qd{|4U}䷮Ա.zPX>MZ}>S|f5*4nj>n{7}v1ѱԗ75KC(7I\GpumׄB(3@y)҈dx n5{i[0i$ Q~Dhr\|>7mrc-յX (IYuhn|B 8č?X)|^~%Jm p6.Ѿ VL M5͘ F_tu wI) 9N7z;8/t~gp9ȍ|n8S]n!G+J]}ed nramWڄj/*po* J]s!Ŗ\d2LlpkA[oӠv OʂF /#m47j]8@ &I 0P~EG|qPA TG @ !2W-Q=.$.h踝-}mX[m_XmN! Ce4@GGutRV@{뭷< *U#ꛉ 9^zVѱD&,)XS, IAt&_~]Bێ1bz&AIh&IaҔ#n~/")br>bfM;$BNӺoQD [b;6*? &p 9,aqAA@+09sZ YnL[0+9Kk*(הmyD2nqeEvډ_MKgR(oxp Xn] x4lN#Gk@\k+y)uF^|Uni7;0V5Mnhۭ|#3iWOcGzocWݾ}{}sړPG"x\wy{mk'{_ۜ_|-Bݺp=R](P ok Qc pj|}9|sx͒pf"fQ7v]~V)iHKlz_xa#9q7grnv1> "?Uerg0YC\SGpwCZ݉c` G1 ~/g^3Z\ƍ`h{A)ss]ßu].!!ArzApM&d2 Մt-߇)!Q<!bVwq Qf:0xhHB1 :LU!Wp&#@J&QA5$)Ce] 귍ԅmȀ^A8`qL>4oF\'8MAۉo{"4t 1)n$ڦ)i}o\~heR-i?Eg{VQ4KCܔYςh_~o߹QÃ+K#hɃۼfC<moWOk+k}=k"pp9k3v. gXD@Wi/V.?c xŀr (߭T~W,$eHH>~sB3%UVm? [R2mU>x:6yn t]h HaQṔ[Uܭt>J0y8 lO}DRfO-ԭEfUI A.9~4}KI)ݘICJZMJ~x7egr2V?!ˢ%c#p7rNT(5m4J\8Vp.2. 0Ա.a7ȟرw,zV?qwēσyEv#:7 L&d2{ҐϖW  BUN{I=P +FWn;w>Hzܫ.GJA#r,"40Q@P}Z0i  :.]q8jss:vm]&. cЦ?5 k&N//@(p)R9_4]>?"":w&{6)A/ -7e 3CqGDQN KIh_$~p񣀗#FX}&{*,>\Ȥ9V`t(J>~] ڲ}K{hsh'װݴsig3#Gy ԭݘ±2:hHjZ,,@UY)}Wм<'2e<ͿHY78WkpH{WoH"sqٳA.~MV8u؂0ne< L~!rM7O'߼yƊpM&d2#e pjemh}|[a?@M `@XJdND.A$w jb+3 Eujފ0C_CfAY q@\^F\ } [}SQoD',"vd\&BD ӗ=!=D*L 4Kd }㇨>Ǐ;+ lvneGyba pF+o_YÇIԈ^0jO%P`0M8&XXHQR2[|B!$`0E8@L| _ |;wc˔)+;g#`_a!u,_Sy'O%,TGg hl2R|)9>7Oڶ]:u)H?F{ryO19q7U=$0ke{m/6a?Ofe+lc&-E"1ҵ3Ǹ OnɚhZ)Mj꿌H r'ci YXDp pcNJԏ(('Ԗ??4H㮻aTuyC?RbE} L&d2e$_Uuy|2drڀOz ^+9T_x$=Ђ0&\W~@p OSK.n,%6gBM֋3'ju{:k`DG;^>G5?@w29p$=L({LnT6@ F=g2HIێ xپ~=F0Ev,(sעnp~;"\WMH9Z7#.6,֮~]vg4?mR%Wzh&AuM%׮p Ei*&~>Q̓m(ܹ' %E `sB49F;K A Jދ_ 2a&ikT4|LI&> ß7EEܫt7ʴM-}5Gƀ[|\w7\M}U? m噝#s7l_B9G6mU-M)mFIGR ңhz#ܥ|:[nivqb}+ Q5kU߭a Y,|vr  %$Ari.U7¯a\~/~Mc*;/ EY۬./|#ߥWc%W_}5 /+e+N!-~n?Oz3w6k2L&ɔM[[盂%ZBвgM3 N0iP0PDl rܲD) $U)BCXHo{|O屨OJ5&L59#Y<uѢEZ0. <}>,PR7ipw7(jмm[z>g@EOȨԯkr: R}U ѕ,?U7j&IMz.?0WfEP7|o&Ko7i;%վ[Uu{a.1; u/}.@C|RٺT:v4PAP ?B˶YK}"~Cd=cqߧjӦx<{}޼fudhq4e6W>+PH xrA~:K`A$pI.7jW1^ Ai]xbYfD5Ɨv{|ITz>׋:b5KH:7Qo1)(J.@ E p1 tޥ:5qMVn35jZ˷eʩGSÿ1Q~sТmI>Iə}7,JM [ѼXiu6{G.vnX!VCn= N;-YK"cV s7{ H+#Bs+q=}h[= hHOiꩧ>x":pM&d22B*Pg̠83)jP[g¤xck 5Y>Cy pco$73ƀ -`A&D)0  7EQ|Ryefm\>qyp@@JOZ*7mz.=b )~:?H8@tGrCmc40 7RBjuS'?%6@7=8uG~S x<9M? Kwo/ w!"urs_PpӲlXc{?uHꇂ/(k7trbm-F Z׬[ѿ1 3%Og-KG pCZ:rs wof}tn:ܟtMz%$i&d2L pP3g9$z*.;`n t3fLd& I5q&h>FHrpo{ xTul2>>JAx#Wx"ahNU.a;ԇzFiڿAfh+"l5i5u030fQ˩#AݠA~7)m- BjK7Ij곁:ܴ!D|$1atnc3~@/;osVN编ך"$%nOxo'b⳹Y1[|Hr+WLt ]j" ?_z3pt|*g|#Dk?fXңh.sm/wc)y؅ݎg $̹ܰK&y R ~>Ηw 5mY!mAZ6xDoנquź~BߣiT 䙥^aEU:a .T8/[_NYe 7zs( B74'J*뮻4mڴ,>[oSe]VGkTl{|5L&d |`K4V4b*Pq/%,\X}hQD#Ll(\p?S'>@.iHJ;ߥa;n<~ kY3Ӽ?ۑz\RwDSI$xZR8R(?t$伿_> iBއrTRTb1'>0+,1i|+7cLXI{@GJ>Pe"ݙ'BZpAS m4B9 8ʹ.)¨=>r{ Ғ*"W4fy]u ȥ9ǻ9؊9bleQe ujܥ}=n#SǴmuK;b,K"o@t!FkΓ,JD,07A`BEӯ\Iڗ0^~Lk?h{q0 o#Vm[>) 3{2L?(;xʰA4 Mቺ~zR&+yq㸮)}Bg}v;i&d2L pC 2mI7 ʕ+AP;C)`-Yd yxl9 jdDROއ6ǠzPgj0'":aDhZ)7H)ˣ|@c2As\"՘3<~O7I7+39b{,֤%:/2Rb^Ѵh w#/2j'"fSkG| s(~MM7+'nW)+O|>}]_.ϒ"pk&ԙzG 6Nt|;> x$qCP`_i 7MH( $ v@I{W%nvhl؞u6;\O:$p` 7¸.%OX["Agr;wvd\ҍ>u}vZ@gqJy/q5>Ûo.'Oȋ/GETR{+5l0RdI&d2L pQt_ 5<ƠIqHQ+>*0pgP>V 7c1f; 40P0큐/3D1Ơ}D~D&jdHtU?)Nc8ʠ.ԟ c2*a-$؞Q]86CY|6#ԁ(!AYLҢpNl M3߆?g?p[Go 7(S WnWik6k2L&ɔt)hú-ZG"ɫIͱ Y!f':52kfהSE427 c#pO>2{nbnի(P 1Wq L&d2"J,)ZRGLUXٸ4P[ "t%\\330 7ؓ;w27x#3Ľ[?. l\d2L&B;'. pM&d2 f )b9!jE}X 5533kbQ["jI\)M4޽{'.Wh]'}SNpM&d2 r 52kfהS%Xy#-\K<ܘnIgr-m۶h1H5q4o2k2L&)"pM*fpM' 5DG(Q| =vʚdqdӼd2L,5e 52kfהէOHv";TdFZ,rǝs9^x!Ai=eʕ>}ѹѼd2L,5e 52kfהSN4n8'iO|W2e&6H"_~nyQ\Xn4o2k2L&)"pM*fpM' 54cIФIB EJ.@QDڞ9{l^aΜ9_{.!!J*E7o=MpM&d2e@$^ve>Ր!C̎.pͲ=_W ĵiSLpͲ#رfڀ쳑uv뮻~6}Wzڲeȩjd2L&Swʕkfv̍ok5b{E k}5d^z\,yGicAPGyN@H>UpzJ;hO5L&ɔV.wܖB%ʓ/uW_q۵p~V,33+s~_?orFSVܛo};K۸Ǟ5w53sc&q]w=_eg)-z[zˣ>,RZHӦMoV4S܂sO>L&dwys=7rG*VQJ;cKd2LV˗TZ´jl63t Z,ӭ`Zp#Y'fmDso`aYZ{ _|_O>D*Tyǎ.\8<-ZD>cPL&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2L&d2LI7oر#hѢرc#~YۨQ"#Fc_7 o&2rH L3gU6zɓ# .L81l2dL&d2GSL̘1Ãy"?SdʕEnٵkWd~_DfΜٴiSdҥ۷'ߺu"/^: 4hYfG؀Y_e}gvNe:b}a5jDN>}ML&d2R0sڴiN4 |iv֪U"6l8;aY+~1df͚ȧ~-%s.ш={ffYaϰ~0`fYe\Z_e7/̲ʘ{0&xe˖>*d2L&ɔ6neZ>EQw>KZ 00}ɤ)]هmT| p}McXÆ ЖDiZN?.W_}uw= طWVGr.ibOD *w]wEox#~,ӌ||-v 0;Jovoܣ>-X,S{q蜾ϙeu`;pw}+PY?/O"ۤd2L&)%LD j"P;W^NѶo߾N9撗#wR/GVZE˛7/<3fL{n{߾}QJd,xgϞ}ŀt.@ڰsrԾg Z Z;]5{OSSNKF1_|7kMw_Rwcsӆ||QܯG vE_x͘?}?yY&Wna+sb^k v]1;fA+5i|܈o#2o_;u։4j(L&d2$`))y5V2Nd+UrDmi8@7{?N' %Vcڀd'@Hcǎ> e."9"6ryOR+Vѻ\}EgiG^ tDf1+ɀo˕+W{ff_&_>^}nץϋukwo.-nnn3̲nopa|2" 0gvLLOE:L#_[퟿7gfY`pþ?TիS)4hSL&d2RPX*xWwWxx F˞m{&EfyI`g*5A3m/xzfM|.o}NW9EWX-Z0]ةa&Σpu3phf>me?ٳ'+>zllMj;wԛ^VU]+Bh p K/yffܮ駟οuysvfNw;s1; }r+Rw? nօemضmڵlsuԭPJH2e‸2:02Un;YfUܜ5?G L&d2GD֒vV啽nݺm@/)mѢs`E׾$8[h] :tVo}km,Y$7xM0Ց@vU."`;2ZևDx_$)t}UD n<a;**xh>w\Eo߭["qI &h[j8okܹ}]R1/%>N;-rI'Gb 5L&dR!R:S6@U姽ʢe`]_)8~D*U9s)Kuo/ X #f `Y^:wU} ̞#5"tGR>8 %DiGMѹM[xqQ"ii&ӱga pydh53k2L&ɔCrJh K$XZLPt5B w)O+p[[WQ뵨؍ZDY"d?*U"hXKZ/w+6ѰձXǝ9H߭h]GyDŽrAAV?_=>w/2Ѐa}6,$_.}n02k2k\d2Lڵkq:o<`O-@+>} }(WE~p֊m}'Us D"f>>EC-vj7 p\"dU{8dmV+_bŊycl,V2D1oϞ=0-#k9,vޯ-Qi%GM 533k2kffd2L&)kxbnJW!݋ m۶1P؏I`⬑ʕ+D ^]$gr)\-6i߾= mm.U T+#!%;s/8tqh' "/PdrkmUįoQ\33&ffpM&d2Ti Jb5TsRx*h|߾}**6^|?Xv]:T IbbR{7lؐQu' *g`jR6RR`eʥF| T=JLٲeYl>Cn\.{ [=U$wls831uؑ [4I%DjAu0k2k\d2LլY^ Ȝ"9Ckohxkپ"Tg TAVlOy޽7뻺@ɓ'߱}4 Hנm%%ORtL9lI߰skƏwĈng*ʶrEcEVc?"| 6\CϑUBmiZMMV*m ;v55kffdd2L&SjEz U"m#J/@*|XnA@5Vo ^J[2Ul'r ^AVJ#e吆JjW7O3Vcp,#ϫKZe)6PnΝ};;W0Ѵ3ր,`F͛755kffdd2L&S %r |UDn>E-A)r^qf׿v_Xە?kY H8moxs=WTAe_A8b1E)4,>orRo%`K.rLX,Tfx\k2k\d2L! hT(` Ԣ_-[XhXω*g.`4EEkJ١Cz `UfJ{9Dta?s|>㕿b Y˱6nxP}}Qԟb~@6QmLb5'ԋI)W+[`_7 9R2k\JKQ:(12kfd2L&1STH>f#^[la0@G pZ4G3O!I7%]ĀDt4(DDIsrM ~n֬Y"IC "@XD9XRQ7 PS/Oȷ Rw>r\?sa JRWٖT &ITq<7nؗO^ߓ9Q>|Mp f Ox2CI }ÍP|$ nqZC!q]zOz%q6R3c\/ s3ꌯ-\Io!| @p5; 5BN{|醒}n?Uk\\?`E-Asnݺ%s <0-~7cנp S]vSgp6͍M y>5@8$ .0o]GW#)]pM&d2L) PrwǑNL>z)I8"9#R`ylݺ0 ?/^I~/&Xef -0I# )28=\}.B/1gG~_&ԉߝ8j"&UvǤJ@ECD+hڴiSg}TLO}c1.%JԎE{؟&Зa?>{M`sy0y'j-pN-ő\1.۱ ] ݋zM rR~cñd6Y}}9 ~ܾ$5mڹmڵ찀mm1S9_X8۰kX;x[u>[b[kmWV5K.){?ԢE )$F7[\NpL uFi$^؛J|O2v?pW`ш2FT\&s/$}r ;[nmnʢf ]s`s׻13W 7ww"o#a03/0<@ mQӤiT>UEX- ( t (&6p4VWT)FY!U `o+EѿN׋:Ak]ʣ2Q`x T(SLX. j}P:d&hR(+@*o]۔\.7*ؗc3!GnG_z ݡ-w.cH ܨm6i5IHO5q:vpc?VaÔwG s]TNӄʊ. g۹>(]zu݈ ǨO \8gע9۵صKsӖݤ]\M݂Sv.uSfZk%k{@hTױ[ Lcڸzru[ FOs-Ļu\u\]֑飀S68cgWKt*7WȏגOڕ&?GZZWqs*,*_R": ܘYrFzVckԼ\Q[ݛd#*]C pF A\:wߤ7'wꋡo~55I7h^ήMAnsc/=ӵdUUUoٵ>~AJ,7gkھkSװU7QϴcȘ9q l䔥^>;́x6iӰՁc4iˍum?ŷ8X ~~cq L&d2-e@CVZvڹ;&Zm۶u9Amri 4xig^qچ禫\ɯvs\N]\>@qHEt<yZ㚴>ȕ}\R2UFE~n-AD<Y_fm+Wa2Uwuus#(sXuM׸TDr0 99GYdAEAkIpzz辡Uu{TFϗo.s|zcߠqnv~|kXa+ :ywt<|G~>l;v*Xso'_ߗcziֶg'u`?u#C&߫X;r}W>>rSױQZQpM&d2Lzhx1;S 4OF:k9>VȀN@_%).v>˻w)? tnsNA;N,܈>^<Ü<~V <, Ϫ:Խx[qB~x?$?2;Gq|Ȕ(w;u$y=>SC$x@&@v]1xofڔqJYr҄p.YخA}>VH 7|gZ֪.=2\>a\Ӹ1Dw>u= `9izq ( lP8r2=v=9ow?0 *dZۺ?m;FNw 9/hcyV{b؀90ʬgLP>,p|bt[V?^P86Vn_pJAI-+oUk~%x<0ܵr ab pY][;Rs<=Vl ÐҬ<? 'BC%<|,[y3ܾkmZ,XK+0ϠNiWu!.˅gaEZh5LvwᶟVgܸmiرk|4iox<`;~{G+WlK t /1pi>gD֢=$U/gЦ-_njwW_1%/>%~+{lC{JHHηS*7yߖoO;D~&oޥU[7VW>3#`; j6>N Ȕ{]Բ-@%lBi /}V;-65).D oçWMp{t`Hmz @D!"|Oف5q 6]Ml3q(4%ɸ].B<5L&d2z&PP3 GE]1 #~zJ(m,wv@B,E:Rpu@V ]$Fxٲej6z LF}^LWEpȫ~/N\VƘgjnG ОdMMFwocWb;mwxA^Vk[i*\޷{Tżua! @mf %b+hŴ !*@Q-`"/Զn!t}j'#TЕ6jp'/=6$`*mSbv_׮h˩gmC\sx2녗M,D,q^"^C}baC9dp׏Pm73[K >xȭդ;'W~},h[i;a; f 9 * 50mfMuup Ey/nRpi+:= JrpM&d2Lp.".++C& Ʀ@߼,o= W,Ĉ6'߳1ae(>קrΨ\{xRb ^ 6mJ~82v+ox&|A-]p>LJ\z뭮N0{̥N({t qCc{Vq,pA޽D!uǀ[ zu!u/r8y-!.N~4ys!m"}ܝ_, [$MLX>qe3|hp$;8X Pai1.l+78Bi)!R5v޷]@QD-^m6U}_o\h{EΙ- 6sҌ|qĢg avfm T.P\@ ߗe30mV޲e3 eHc /DUqj2e[ 1w_!/Q1Z H"9. ʍx9GOq(ށ]0#3Mzx 2_ o1[cpPrc1/_lb^a}=S8eƦ/8'U~qZ [F wSZ\ ,li3 |O9kԬ;g d8/VoZEv]ZJXyP\(Y, SorL/*ҮEKh[spSN.uuV6bN}E%]wx%ORP:3t_JW~>p/+`) 7x6v>JakY,'c{%\jFXe01h 6,ɔ/Zb9V\b[.b|ăd2L&T7w3s>htj9aU 5:iPفzw"ۊ P!dK m}گmY_.[PV)'e}( m_^~ύg(eժU]?HC/6 $-pFt1狟*x6>mV0~e;u& b;Il0KyrꁋU߅u}>4|_޷ ppM&d2LQ_& N# &(ƨE, pc'$Ze;{B)l/<=$̏UepQV@d2pC]> +(q?2Jz%G !wO?t@C2LO2ou0`pn`]Vb0RoӔk>~;vfOɋ)s_8 X [7pWFr}Ri8~Κ -шe Xe27wbc. ޠb%'/v `d2L&d*+xCFF&-Cy.@%wʮbb Įx#`hMp19?Ye#ƴM;rTʸmv$ 1*{`w@gluİ66"|v 鳑 (; y^d袋p{_4c_oWŔ[3<B&*7\fg)λ[y&[+H̅R0fv>?+vDxYNBd2L&T@72,?ZY#1>eP3ʗ/\=xh0^^bEbf,*o۔o8?+C41^H X+9ѸlMު o/ w ݇ȭX 4U@BW rQnx =`+/*$[ ) I\f 8سii;#2,nX;/u+att\=pnMXK vP?o3 N$-,/\W!Wywu] `%^'Q.aF){p+Fyp%sVQXqeu6F eq4l6=>MP_,;Ct)Q/ߝpM&d2Lĸ歷r&`T#y>E I ⋁2Bidt11tVi:E?[LM+CQb֮]"L U^s\׶9F ̔wסX$X@eވocu]禭-sW5zo\q.`:\icF?u}~N5U޷x=cb9ZvwW` fg5r8m_BY| \ lZ0yoi*Kx2oyؼFu wt nms}_|ȺowzކPBAzѹZND~rh:Pz9\_.Waܠ>YSig W6BGc?묳ݻwޛ6R`,uF|\>,Ńvy9a!”%n_6p 7םcrNM"gZB)/&d2L&S\7ȉaywG7GñʸoS,T|/ A 1M/TH~p2j:@XAc< u)ౄGRI+}zɥ㼱.eV|[J8M]vRmqiϣaV}Pm2B_e8.nŤ'׹eaU_ et'PLy AQTput+(Aĵ.>x@ Φ^<+ý ^i@>VnA]19x[x լ충WYܯbnJx9Vf)3\:nEiۂyYBaCK*3rᙛ<buJ X3h'`sa'๛Ղ_ P]{qv"\ba|̣&o 8kKf-z,| pR(E?W/W,|g;W\6z m׀,bRFfĶ뚹r^ĥxL>"%%>qb{f9xRo ;T^ix)Saޛ4IpmڦbԮu/KDm?zˋ֩dl]$gƹP@:gwmNK!,7{xonl>bfZpү3z5L&d2c0, ѻP2֜` àVQ/vbc@[2`8K} XÝ2.$dN_F.t| K"D{a '{A kLL~8+IJ%,`Y$%Q:PΫwzo*~, Il1F^`P\`@l7 ؤ:4Xon8χ5~Ċ%_υY𳤦a $L36zp~{͚ڥxv+&ui$ղEYޫ0A }au36ꄝ~y/2˘$8Ž|U|,K\@dA\hh{mӢh[ Tk؅16{dpt\YW\&~v&P">n&jtO6Ђlp=˲埅|h\dfSMlKD+Pk C, ?[Dm/&/41˱spy!v*?%ul_Yh \ 6p:Y%^/嫲^2P_;\i4\avx}ou Sf꣰\aƲoT:i$.ManC&jE8h:Un?śɝh&wo3f6TSgkv8zIÏU+ؽn8ap2oê[ 6q yƅ75 ~bd2L&d7Zb ;MIU0$cRJe~𖸬La-[,`~ˈ{\A/T֭[peAےm1&e^7?Naz >*Om TE]k9B`yeL&d"yNEBOQuu ן/y<aL/^|MK<'>>.ױQW\H@b#7.pvC5@ul鋃QSs.B|{rsmaz>7C>a1,RAYƴ ۷Mf./^Wxz\08=N$=q`4[MzZVxbCL.K@kAԵ_/r`M~׮GvY2q[ hW !lˋ2^zDV-MSCj 2iCIp솦IT/:~& T=zU[[ \ $9svݷ]䅝Y*OE"%Uris؟}9>bou\ҽ  \atVF TQ3*AWk5AOn_LVӂc{p8тg((fX!oA k0p̜Ln.|BͰSQ1rsY,>dDZA.xV\,{kk!Cy_r>ܝpM&d2Lpq1ꈫ@&㷅cR\j! X] L(@H\Umz1U1N Ad%sI`6Yc-yw< 1 9 @jm[)'=Nr@#@ݙ-dXcF C]U7<-\X=s O36 p\f?\j^O돂pW) 0|L4rB`=n^<(7epEeq}]RŤ]zfI^+tBygupxx R zOmhkue?(}Cf8\<3R-s-bjߚm/ euY Ëv>ItOw'vG o|)qx!EéǜO[W[˂eS/7Ϫo+Eu4 \u %L']wY@qQ&и.-coi630G+'w }ʾc[mȍȡ&/C2M\](Cm9 D5l@ށou.\zh1lsB= Nt=Lγ7cgtۜ9}?YW7m?SGLVI}[lq@D O2#| xOx^xLb(w+~sNAT":4tp}J!}N;;oП|~?oI8(Y1@,駟ܱ'm6}}hK o]6fIH}vNDqIߞ]B&rfkMpM&d2L,Ek f|HŸƐ{}fM$g.&-34+s.-o[/.&s^x]ͶShg C^]渄7;/yV${ ܤ[a[cP4T=c̔%z$JԩO˚:~b׃zs dw_ECK=Cy,ݤ˩O|})fĆ|h~_02'ʮ]ڿԤS{oa7 L&d2L pM{ h,#7?EorXFO7[cH#\KpM& L&d2LpUxmf͚x[SP^[?&i^04k%&\KpM&d2LpM )Q{R\#ٗvΝp-dd2kd2L&i_D F- W&v+s1ghג\KpM& L&d2 ಀXMbêxnڴP֕\ Lp-5L&d2 滀X( M7oenV/2kג\%&d2L&\\-5dd2L&d%KpMp-5k2L&d2k2k%&\KpM&d2LpM&p-Y2k2kɒ\d2L&p- <81b%Ky rp Z:.0g>K3o>8찿dd2L&tH$n "ɖ,yTn;w ^xw~?_-5h)_71bŊA&Mͳ'gUsw5L&d29tY2e_|1x,YĽvws<דּK;_V)U__0 wd) wqe_6l7x#xͳ'饗^ .roT=sͿ_|ے|H˿ [ݷy_X\d2L&~8'=k׮=jժ,YʯTV<줓 [שvl lߠ~ؾJI;o)[NR”vÔ,OjվIغcO _}U5j`mL 4p٥އfzwd)ӧ+ ׫:17ntٌ d2L&Ӿk k ^x?Jҥ-Yʷ=wyMAϞ=1cƘa2L&dwkb=\ ǥSN9Œ|OŊ N{V!&pjԨQI't9PB8#쥗^ 4i AW>A[h<3s=t!TyߠFk&?hܸqPN7W^yep_~yp)z4S$J0L_} =3 L&A1Hտ??YË.n=&p[ng},sɒ%~p6lvm3+V8 ˰t``p׿ЬY3p6lLYp' ƏqӮ]&LLhݱ3f '\D+6l5Vg}vpGqDp>YT@wV;UU Z}XKp+=uժU#<s9<_U<- "}TkK潣%K޿k9֭jd2.mTiYFކ *dNŲ{82ĈY E0?8жx?^/ଳr ze_ F*VBnݚ0VZr;vf9ln`ӦMA}zbۈ6nذ2Y=CEbݖ+Wc0jԷ.]: ).1qcUhQbD} Uo*J5-YDŽNqoFob)?SEڢ~ۢPĒOO&)B/2fKkhˏ=SMnE1u#9}|8#5Ky:F-ZVbX ~|xSYo^UѰ؉'8昣RΊd2 XdE Ԧ*+}Dak:U'޶Ė%7׿-~/{y= u҈Sӯ_t h*p .N-aW:Ӆ2rk2dpM7:W{/cjׯ_):rG?Q6ZA@>D1巢SKm([U)_Etl*'xU)]]t&/>SB/oeЃEU"z[jK&1.R,Z[|הbLP:Naq;vݒIn3q7 *z#(z38NT p[%2M2rQG+@فG\]˶[xW>Zb-\RA^'etk1Z=_)y~}+V1w%X+ EPj׮}D`Tž}_?hܸq1 Q>cu𢶰.jF{]T);_ \[Pfr Ly^{ ipL^8vk;D\܍-knd2rHV\S72@N*.ƃO?O0 qػbBqr?aUO>.Y$ D@.DXPdK1"3 3_]#BYKY(߅MpM-\KpM&)?su믿v }X u po_$b]Z2Ugd`ppXoQ, m+^b>e(H{uJA2Vhwu`JQ^Ki Zխ܄gP+anziӂ 68+^@0Ne,t&,^8 DpM-4|"D͛7o>u.X/ݿ)DS61qcjʺ0k*h5 Z2k2L$og1jūW :Gϝo/Wez(Ξ;R oTک8Jb۲* 6PNmKe¾bhW.\SA)+-]rc ]qA@h fVҥ7_dT2kOdd2IӧOvBL:5xם+X+ōe2 eMa;/Wt 7' yϑ|矻sFqhqg^zA;=uY(EGۖ{M2կ L8跈/b}Q ObDeT 2k*h5e%`e˖-HhNEAEh?Arn煬6K_hBQpM)\KpM&)デ~Z93S -I$i׮]{v,@Gźߛ4i(F ڶm(ƭ3FRJU|;4@,E.gnj*w\B5L0!oW_};3f8p "0f;+_Q>Μ9 3uXiذ+/iRN:QFfUu@\Sx;bӣG/:)3/ܿ;K*/=y9y6M,[,Xh^%!^ѮH[=3=}2kNܯ#ڽ^zjω,lC+wm$^s0n~'bƗ c`\S~%&T"@urޮ\x* cEC) y:pbcjijxಘS)$B 0`{Do^@`t H X|{ p9?O$ꕲSx%a%"!e%)g|͵x][T\WRru660C"!l&ͳyb,7c3A$_|y?zꩁZph$w.fm߾ݍ!jcѦUV^gl'<^մܷfJvT*;?i7yJ~/+\2ޣߠ`C[Mި{)N0\ h #Y >L3~#?[#m}:3m;^8ہ{}h8Am:&~\s1m2ҿpn#7 bAFM;ho{١>B^ Z2k2L&gm2Hꪫ `Ph.@@ ILj F.'tca3T}GlZpD6sOjd=#p  q2E;wt@X?)B0R_ e Y`.eFcbyJ^(gktoGaɹ0*0s, HFO(P!4lGSxstKҽ(\Wzv (3`Db/{ԃׂ00πX/I -Fddy pkG>î]0 ʈ boGxfW"!f#n| ]_x\fB _cݧE@;Q˳E"Xɽ=E5\~m%m VwO86A"26ȷ#(Hx^*ǒoۺ_е:k' m=UQ;HhY7v/o{<[xq>6 ~9.20DnhhyqDċWoqN |?fѿ9pg:ϾP^.Qo> Yas 6Icm`w߱͛}pʽŋ.-zu9\dž6c\_¤ş?DƷM9/>#AHs,^jҕXyp/ŏ9m)̘g<+V̵ی hgxxƵ},\\O8 !&c ˶OؐqxGhFS>I;yD3r+U»48ﯸܧ9)dWa\juX  Ӿۚm~2ouXJ7 MmG~K%lS !Eky0X~QBAPdI_yVNsr)3ЎF9u؏Whgm{)x ĝ.#w}9LKe@axPx@ttT2R @yTx>yxdp{TuƈW=" 1<~o]U?/n *}Ljڳt_ӵO?.`hzWadyQ #O}3R`Q m۶] >U/2^p3ׂC9Ʃσ*cgmw0{<29@0s~y[eQ}ϐQKCu(d,g)7SOz|q҆ Vm{΋/cN;G+pbZ8x[ o%zG8):گT)o3Ҋx?\L`KHs( *P/~Y/M䅷B8F܋J}`kG:ўziTOއOsSҬg=ǽr~$c]I>+$}C4~i,!zt$>(UU| Ԙ^ (^ XfF^^0}jSK84'ѽ0Hμ`6c^K*Ʃ`Sk80c\vc}d5g'qiih9&ܣ32Xj7>m*,: fПp1Ɖ[@2N=9&ItPw@H3\$?f(`$0jC/W /3 O d NjOO` m!/ 9@^;x`I <*ݭAEf'APxύ8[Iұ"Ogz^`3R dq=% Xv!$c *=Ob՚-h:ܚwGqD-ҽzTeVqﶓa9/-B /*?~ᇻ{PV|~Ucu(z衡?=c~"IF5GydUC?yasan@@,㞗x빘| '$,yйQx@s $ 9%u.#6{P;s!^@qo)k0/%76RP]ˡރQxu"+ 8 aD ] S R,dF>xq,׳_YNO Ӽ7}/&)B(Y=+z.BQix2pu u?.s^{1cCh}q)/-x1Θ7ng +↑׍XB],KHvƁp%\~7}dfC5~81)3$t"xުnx,Xo@Ye/ŋq3ч7 ]}(q[K/yny1oQVIm/_|هkA;}GR4NLT{&:@Y䈣ǞxJZQШlV{_=rxѣ伀XV=6HE4f%<ȣ =hب"2^;mٟWeU/-]óPX.m(~%=KA%m>{3ߦ6"QN?!fk4~n/1z鷣SS=h1ݏx=z EJ=zvs:P 77bLq'z6ۧN/ǂ*s, B=ܻ?ZsʻFp]Fi\ߧ{|dj:~}5^7+dDl<ΕˋBF#,p_hڈc_zEVyFeظ$ |q,3oPyi?9nvkŨ?~%9m d-r}lƔ!YYa /7Ł pyILoC?s>TRmr?7 X5ºԝΠi=hum^ns4>iv'?&t[m4~mqwGf&bQ",?.ֳ2N}29GeIKL33}5mFxo3[d綦Ƹ]KR.i.$J;r !oyT 'CT  Øhx ynAZуːgoԐC3cQنQR[*[Q9s)?w@$r<"`7'4uhY Ax =.:/z6nE$Dh&[Qo18:e:8Y3J &,P&FYd}B \<C%pQޡuxEPNY&2eJXj99OoȢxXl]$~isޘ$DV(i7D3۪7kys`7 PSwU=PsY=u5usx6M߭$qKH?2B@ NXU@"&#t9%LdT6m'hp !4^%&K l,tV6)|Qj38E/*ˤ3HOƍb+1~3IceG̉"Ž%p9l(³\Iv&(z_gzm[R7bMNļNaQ6ĻP0ٱ$lއ}$w'dW.XZ6!k\G~Rp9V{uK/S+]qԆlԳ\'=Bjr[(lp<dʮwKlи0YCt.fm0 èVȤј#{:LJXw#a iD]9Ei: Ǔfv踳_DGNB9y0?d /s \5`,a늎]ibbzPJٯy=iJ9Ku<]R@#5$໲:(QJm0.r3 tȈl e1TCL̾88,vz$$!oEo6$I#붶 Erg-.UeuIJ*JzróNNtb䂈r 2sCsTk"kde K5BaauP Fj=-e&'㑬 ¶LΆPD?E9H χHL:X-JNcZ1[@(nqxNlK"I3upMP]b%XEg;<:P=}wF݉q؊ɋ"pyׄm4sX8ȄL~CJQR0Q-O.YI G[ ̌ ?/}}!H hwLj³ d('qG}$zD1[˃UF&zK=ί$X ިñ`0XD!R;vxBB'Ͱ I!ocv %&Nn"yv4m K{풢1#]1Ɣ0^Q߮da'hqL).ck>+{y +}5q_tLZ.*%X"YV,յ=GקkDӹI̊"py睮ر"hû={vPub'™iAg-}'"ة zXզU !K AIe[p6=86酙 W;J+/a8k$<8d32:9'@*=><-@N.Lj{\RW܂<3]|:`wy8#8({PaeN.-uTYVs+4mN;6u>3?#C,v³AN[Np?C92IHH?g:--J)ԝX q['wy8ސ$DzGYABA@FKrN _EYd\R: c򀖧B' |b $Kc{9 7g x&&p/EVd "56JF?$]2WKXKܩerz]L1]F(d?-3w }"cb KØK j$b.M yGRH\H+ƾvI;ba)ʆ  E5a„%}~Mn'Ch똯l!O]w66Zz{m=(E{b/3ۗq\vM@p14`Ui1Kb,֤Xk_3%voc3h20dZ5I/{hl%ISKYbBdQ)$iQ rϸBjUJOEniѿoxq^iq5o,_I cϽ3ߵ9wҸ ^7-Wy4.;[m? m܀{ bVD1kaT+%m863",Qt!DfH ǡ[LR|o`@x %"^$e-IFbcBe `j V8/:~=9+! )B\B6` B g`H'd7R1eCW^i$ ȶ<Q1]F8W|P㘥,G)$m&pLǬA2}#_XKeq@IѠU1bb 1YAAi_uIE}N2Qr{jD<e0D]!Uy,:P \}tV Y;mj"pQ2SxI[8x ˘dlȎ97 G2?gǻņ_5;- .+rCOiYxIa_+֩WjU bׅa]Zբf-JϾ4_Fk@mrbzpZ?<#:-=%۬Z}~r p'5 0j6+e$g}B!7X1&(1y@ȃ\ cB'EZ)W1pLo1Q2t d5@aRAd&1i`FsVx0-(!#ve  scd44X뾄qD!KovYo&r Hfx"nPf?(qpQbDK}CAps 8$b.N7y( qĒV85Ց}A2y&uWt KxqYMi}D8 +z:qdKTxE ?CKTNGK?%e95d!P=*{B.VCMߏ q5 P=zGѱMJX2R0s@栮%n)*5eS^[q#pQL͍.RI6b_T\#hxQ HF>B"2;w\b~-Llb%J (< ڔ&z0͵y6q Œ,nT%7Wy^əaSM p@x^VMd#k ۮ4oَ!qGLƒ `ʹD8 Qlzm f7T(>-d.ݢԷY~C͚!Bt)nFs@$"K{zd"DrE# XWlm;ohaa$WL5(9k`pD,)0l\p,ͺ#, f/wb +4):vǘLx?.re $s%~*N[]="i2/ĹƵi2 rCNuI<8] d鹺*]o])XW޸etg& rB`BLPsKf wS\M?C[1cF]D<.յ5EM7=+Tovv"w9[ukL&`;LK{mg5=E! g +ͶgQdnҴϺ~lQEn/{9' GOhHDc!pJ:w䅄&pnBC&h3ka#phQb\$Y2E0sO _ EVA'888;܇DrKdnŖ1ȊJ@~q5[,)ZDp  c=@Y|yϣigqA3TCD+^ 3dM隫d(X{p/.-=I{" 1P]/ҷ!jgnӽwon 8ۄF=Re#E9PDJ< ey:mi# ljL; )k$jES\pǞlPšbLPP1wOx8Nb0/lՀĪH ԶOQ+P<'>Յרso؏@ȉy1IUTTuʞCXVT'PF 9qzO+AKWno:Io}ٷ=J_!V[#yjݵ20 0[2 ČSHEdz."pz[T(N!9.|:$"E)!rPu ,q!֜ȑ#+2i1R:`pGy!cD6$OYó,`1 t}{NL*' D>gWtQ8%#Cfm!"{xnz/iߕ˘R9[r@U;ՉJ4%QT&+@2KWuWhK}NBߣ  3׎vNX&PP{/; %3K|˴i St^2I4Q_O(kBPpR!pH/! HdyD&@`BI.=Bp] ayE4lĵUfs&p˗j }5mEa8Ic~UvU$pP6iZŲJ-J6F^X{7Kj/fQF%Fu'&"D2ϻݥ1D2?lUU.7j **L}^I>U2-qi|b[-n,,nC M]EZ|ǐlH " :k ]][8|5ϥC_UfOo~ivzXk }ק7t5"wG2ka-!PԐ4@ T2Zr(.;iZBAP惥z(UDt LJ $BANtG9^[hyr:GE_Z9W۔saL$w`OKqe`!'g]GS-u!Et="qQVx_`9d K$!/BH`@X~ض1pQbCR Oy?ls{`.sysq]Uܷ>553 ^WH)Rd[rc[H[FB\H|7l pn H-3-&dQG-gmwbViC!]Sb:a_( ڛFj3Azb }4#HUquM3q*vou"p$5Dwmko)KoK (MhAD1wy!}S$OtoSDDG]iڲq/QLJK{̪"_LMÕxϢ\GrP uQ.5@Wb #pV?vK{(MaF${ H)vzQ]: c7b݄Yy{RWj\AZJor %"b FΓ3_:FȅK*)j%DFICչXyQ g[eR$MHC$·5}\ ./ufqS/D}UnQIҾXR:"*z,v}TG7>Rވ$5\!Č 8DzS{\̳ ) #po>'`77J}pSS郲}N bbMh~ޢ0IBdPj- p5^0F6mڴ0>{B1;ea=Ԯ*NkQ }Q^fZ2m&&O&y햢Tź WQ* \d\~wr2z[G\vQ.+"3&A+9SOӉa@O4I,KJgR> }IDDD˭!AiuɃ_c W|3!hϾ}+_ 'p 0 iosH~ZZuQ\U}4YvpZw+ZM>ǭ5 0LE1JR$H'; P;6s8<8O=TPw0HtZA!)§`iW>+ieH d8$9im2Bol9.!Ydž^8Ila\B`HS.)Q YbA+B  ذՑȸm1FBaB!0#C"-2%@ { ΃Tk +}TE,ĩ*I@P'L $މqٮyF(ُvUQqHD ɉyYQ.Da} 1=U2L;5Q?E1HbF{*jBE]A0Qd`x01PW"2pbIC>{PC9APeQF܃_8A,,ug+ꃶ= %6NJ*ŝL4"3XLAҟ&$+)JwAbUo{eŽvUހ=:VcFD؎"Ύؾ/).}LjRoۨ0d#/!;ckAR807gC6W=5X %o:a*A͘M>( 7ߕL)WK_m^JOVbcB 8VdMۚK;bF઎u0I@^k6Z ń7\@OIPl0&O`rK%بC=blJ!p9mU;Y]%US{mڛƨ_>mtT~B/ػK*q󕦔cξ- 8$^ +qͯp ۏmڼ9 AY˾mUbЮbv+D}(߄- ڷ=uX瓰`a-;  sb#xF8#'8Q,;ũŐSr{9?'C!}yF]0' 8BbXƁVYZ|dƉpU,8 %PP@|㎲ bb $d__~&dƭH1vsc 1x/@ᤰTr _#&3cU;n #p:z@P q! *T D-k6 1fkQG ~TQ=<ϐu" \ GPvިMc{rp&3)0v B'79"y A.RwPF%eܹs[Z?9F]߁}6b!g{ͨԻg,r'U+6T}I6q~; " J/B%=WZ\vC@ѳ##bc]@$%&pˇEͨgޅĦ %}7'+@8A+Bt{l (5$ǡoW~++9!0˃c|2)x8':A=ϙ6.3+CۆO%kkI+ul&w87$}~uPU"[6DBjyl?]CFS;iV}v\wF[M}Tw8c$?l@ujN2\"쑊$p=dBDul6/۫m^EKgk%a=Խ;$ [5nG6R6:5"AG w#u]o- :"IvW\it=B8w~i'\f鿶ݮuѹXtŕujFſE5 0Ltga`;H cH(Hp"F#`_f9c2@P ~Gѵ0c\۲7{srh$-P ~&a<䄌r #2!#siYDGTx{; \:W_;/Ļ%a2WNq}9\rx?͛ N)ATU/nWqJrPg qU$9sv:u^PhwW+qA96PlXT}dr8K"zт#H% cr@clo^Z0A!/*(>M,E Xn>iuPYԾ:׎z7[bDB?*l>MHxXC}2$}E %pySi3F%.+( TF}qu0F>ɧ4F rl}DŽK?2-r6)6us.u.LJZ,8rR&O܎IkC#&&+bo'p7"OF!095bC3Qߘt=&cFYe!(0И<4F|yފ pu#D=;z{>񜚄mFcM'݌'8}^04cb*{>K'pkpc;c.qlv>]+WNE~;2mg@(ہ+GZg~;D&3z +*~KN s.r \sh+}s]7'R &[  La&pKGX2[H؆%r|3(?1#Id ^Jl&R>C. _]BHAjH!ɪE)eDr@^lrXjX&%K!Z2yd,Izԛiu .szvLȦ}H7|qkY>6gm?GCEr~g$5('wBK$ΆNoHdrȚmӌAN0l@bI1`tG/8j3okH|ޯؓp#0R4堼CH* $Z$`nIăc$L,Nx铡/RH?NdVR"4 0uPI=Clu+/?~|Pq/\; .TʋqLj#?33iKWȋo&p0ŠD;ʍwύ}ae\g:1XC5l dV:%d5&e~Kn>hguXAh3&ϒBq5FcE.3X8TՉ%}q BbP8I߭Cs5bnn$RՏIHHvٻNR \GVZh33ŵWiuO =v}O-#+Bdlϗs Nࢠ 醛IzMyi>W-<kq.4]ATħ=a ?wXWShU꬙;jb ,QJMͿ "(y{.IbLa&pKE! MhC#I5#>AdR{MQ2zRELj$ w#!bV9S=eQv$0ۈL`3b$GW[ pQ͈- pYrd FL$H 5ya>Ef$1=Dvi*|OHiZ,萱T:]Ivr.=}s_b%Ye]XژI6,E;+?Jr2/QԠ2Q?/C6Sr&{R Y(fI{h,.9,0]Nhlsn1Sﳧ!W}BjԵl" J`9.o-W&OE=d)¡T2ENIr;!5D_K&HI/ˁ;UJ$py8Y\mF3=u̞4[9?g"m `ǘο8- o!Y[zH|"xoJYޫJX ӟƣzJ/ PZ~RKFǪR 'ѹ>:"?D0dQLxȥL̖Dr]}QWS!~KV_nZl#=YxilEM3]~Wς@aem29W7z I~F$?Ew06>ջW!**=u_WC>J=#Bn$RXHt6=\(Xir+NOHqFB$hO"X}wٿб.sduڦ{ H&pE)h0 è. [`InQaG\,Sϰ,m,-TeSaD4xpIVU+Cs{Y|3"- ]ESf( X:ʰHս9CC C~it(\O4dc!{PTBs#vo/.YicYcyT4TDD甉 IK"%="9{ׄS"H(g#>KC\HEA^[UOptiJ]j'S7eI F$t;ɑT"/P'}8YFR]YHhT>08,!g'ܩX vrzr?[?U/>q 0m~YjrYՍD,qIߛ-I%3Iԓ\X`>,!Ai]n2=<7u4_''R[*8?* /q)wuc&Q$_XmN7V0z[& 6#-Bjl&v6K[)ka2D|_-?="'eEzaj23Ė|v$e#%p%+0.}ͷqs)3dGXqxw4ހd=(an51T(Y1/4>Cb+ :?-k+R#4c "gǕpԤ;UL՝dٷQ5ԩW?y@RؿUqM[&HvBf7jS \|7aQgR>!ܓԣμ6yF5hwh]lհ֡66oS@:jkSl\|Hv:zM ҭZ[h{4kaTyI&Z5JDa#cd\%Q]( QF/؄ cɹmE^# ȉO9iu̸Gq/b+_xEBgBǂm#c 9+.s9f(R|{B"[c[B8&)M8xӺ"csm" 6F utR,/"翓ܢ98Vyp4Q,85Rl#2 c(wD~"7ljxQ \32 U"&&1{<^@RbiկTY|_w6?zR88լ4y&Doi*LZ\T}е|&%[nu[.kDmIiUYx.r(ձܟW2Q?!QS \!.V;g?QҫLf );\h<#1:[맹uP$RC+ue˥!" pQBo>еK_Aݢ2HN}Re5kIn$Ы`W)+2d/=cĠ Eg˪t3Fkn SK;BIJbK~{ohXßEvm7@Hh%l }T@) WPՇ̹Pf5#sǨ(3>P~;@S-A"=EC@bN6ySD`p8Rֶ D]e \~[h,.ƒV"PWQBV2q%(dB[ZqP+ h pW݃U'*~kCH1!cLI` rXmkΙk#~ῲɏxGN]EAhֲG`WsXr''!.?5q]Ot8mB#vMM~ץ>YL,L>z W^6JWQhӝ;:}''P MVm|n$hvOL $' N7۾Mі-=:0?=Kwh9met]׏,8D-iﴉŻu8N6&\[<t= 4Ӷ}.LM5\! zhlƦ[X1C~Ẏ3ܲI 31 \0 \YfEJ3CXW88(cb,}~qY"1 VQ@8`|k NL. d: g n2"a4N!y B6iGB$` H \@tq}ph'lirRg % UP.y'fؖw 9 ̫:՝闺&|R7pO%B|:>! 3PܿgMCI$kF%UF}P'JǧQp!!. 5LBPPx(k*˒S=큉#&JzLq,bR tg@ }2&8J\0GqBP  1@NJigU5(gpX5Iɳۙ1.}xm8%-_& m R>2BLv,WTq x> ;0a Wb|钀 B' H{_ Cd 0)Ue1x_`3$K-EV<l&{&>cE{2ƛE{g8Jq̊)B$ϸP7QoZHF~> `\ն覎q̲4&Cj;@Ozţ }ɻEVfW^*_Xch@&+U`g~h?$mo[ Y{QϚ}?ƷE>pgqRX~Hm.9w!OU\0ka S#4BQ`\Ù]\ 0KRbr8Ȝ õ( ׋1s²pu(r S'by2D$NBq`,买,,KaHHG@E53/dQ(Bp 9_\9ÆRfԢ(09Vi_nt(O!]p pp!ċs llC>pǑ8"L''$Qh( bرa)D\e􇐣(Q\Ēc/2Pm- 訳wZz6L;W._DŽŁk&M>_ץmgU <=3qs4~g miLvAN-PR/sեY:;"2 猄jŢS'?\?h/./ ul3m_xeݹ`B(2T*4wx$vULdCq`R[xƌ%W.P`g wb1d5J"4r<! D{d?w&Ae16D= wI=ʼnJ Ht&p&x/Xw gmSSo&w in@XD٘Z^wBQEE|YqAN#Lql_ *6D@E}>ݲo$ãeeϕ~@lU%(_x1CӄÉ3zI @e a7B2ALYXOlhC(a QR\80πei& %ymS3&0G{>W FUJg͘d7}41Uk ;_6e{-}˱cyźrMs&Pb@&J#E97D3qGMoK`Cc3O=tyǿ \a"ƂD훍7Nʈ3WjȐ!`TWU5&p ?/Djy}+20 l \(Tk,0b0 è0[ ˙bL~.Ve*ZJ=!pQ$(P{V&pʆ \cQ{­U"u[JP0E \"aaQa3bOa6rlw+KѲ6Rܮ8Y95 &pʆ \cQA| }'5*3gNoFeQ0b0 è0kT L FYwIDfp}oEfҼy+\!ޛM FE \0 *Q)0kT6LeŤIg}6i_޶mTR@.Q>6]s}fרl5*&p]LaU&pJJ+Kv/ {g >몫"tBrwH#Fl4mڴogqΝW5^{I&ܻ+AeXܨSN8 kVf2͚5KڶmcaQeK0*kc, j5èHlfI֭!p_nڴQ,tfmPp?{;' 0 >|xw}w7B'4ԯ_O?A\! Eǎ^zݾeB0'߾yavna{>H<55d *ApaK:Zj)Jf饗_ٟܶjTTdMs*O:~m BYf? )+B-Z.;a"5KEc&_~y=՗믿O c馛n(kZz?;.ݾFlk?@*l^]*h·矟t._ԭ[7Im̥jy o{[UML/dK=%=ӕWZUVY9i`LFLda,iXs5Y&VKq..Xs]v_EwtB\uot~gR\_˹GR3RrO~Sїj+_TB=Vv"fvNbƩͮ?r ++ZwW}>M7,PM&2 04 4H|e..TS.'å2JTY[bK7ҥKHjdi%vqD}KeZo9q{?С 0% $M2T0 0 dDPi֬Yرc>QL\?0 0 >7($3 00l2;CP֪U0 0JzgϞ%NСCwcaaf w:蠤]vɡbbʔ)U R? 0 (3h׮]i{9$ Hڴic0 0r&$J 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0/Rc%yr饗&͛7O^zi?0 0 0 RdM6$8p`'O=T2zh?0 cÓn)ySO=5b-2K-Tť2ˆn۷o'§aa,Q9rd&i&]tI5j1 0 0 0*(pׯt9裏G}4y뭷` 0% \pAPޞs9ɰa6iI}=ťꫯ!CN8㓳:+aKzyIN::u: ťR˲.v;9c2~y0 c AK.$뮻Pmi*\s]tхj;.aK}믿&rJy%?]\\\\\\\\\\\\\*tIInݒ뮻.?Sa 0 cÓ'|21bDO'fСoťRV&R&=P܎aaѣI&%n2rsqqqqqqqqqqqqq3vԨQ<`0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0Jq%i&Z2`cI.Oy?O>/2 0 0 (FJ/yדgy&޽{'rJs%?s2yd?(0 0ʆ>,y;9Ӓgqc=y<99/yꩧA%mQ#Fرc>9!CC>C7 0 0 JW^ 'ݻwOի뭷~:|&͚5KN|/Ou떴j*b-[n%(裏N?GI?N[?p0 0J[/3&9sN:%w^C .Hk+L}݂}֭}7ӧOrufM7 .D]wݕ<ɝwޙlAs9a&z8>;SL aaaFi_ "]& 6 gᥗ^uQvm||D)lmڴ –o=*JnmzETć?pO$W\qErq7 Ν;7| o#>>~='|& 0* ;82$V!C%@oZoE>A\sM0z f0k}뭷A#)"v3ڵ#seeXV#(yo>;C `W_}5lYg饗’_|1|aaQ1k֬ A?ԿeX'tR0hcq3-[|E!>/]\!KHPcIh=:th0=1`'[2}tW0 0 HD(KQa_ȯHD $V?VrnN "uFVI4Ա7ݟ4h ҥKҳgpNCԁpDV!¾ՕOJq|uViS~9$'}?mW~"i CK-T*_']mu]7N`qYC6w-ds ' { `vQa*ݩS_PLtGnQe:O? K1<.gjXĬZL\R br#g:Ygu[M! 曻* .JRvH/e21a#?,MeHB'#|1p }53ǧΗIe0"}Sy|;<;4foo߾:Gc >MTɴiӚs2:T_ H`{_Mvy`;f߱Yrzbn^{m^xd=pcZ`Tjs4 aM@\XB ;0 d!'Y G3YJlkltU |xlXES4aDDhMuCOwwr[ZK*AHXY(_c.+,K/':?*c|11wRj>_骕]*ߠ] s) h!a,N@ȢeBEurotNSRө|TuvUO*t̶Y>D5 A 1TGͲ7'M̜93tV"}``@/oY\@ܒ4:axP3ƨ/Œ65c'Axb=cEE۾k/C2)IY,1%uřzN:x-#(afg**[| q6/ꚟa>+Y޿s{j{kȖgޕQM6MtD&x7K e5KXy^$4` j#tQI! sp|\\*}MTv1jM#!Ȟ6駟DRˆ䯀U"FC&t%.E!Zm-*)H{Vg%xFD!!IU}l() ڵ^;%Iۿ,_ Z<~] ޒxm"Kɸ/!O Yş ڑx8/^ 3j[|y9%){S c2N~ߗ:KL\+q è4(1W mLڍ _c۪ [`bq_)S?8qB"> ĘbYR$!3)m00aOaD/IHtD݉٠lv"#*|5l E:yop8S0ſ&cmHUE Ll(-mgf,zo̢ B L8wEʎ6Z*2@ߗvf;hf Q(Lv2 0 0j4$(7 `3 UH$x_BѲGkhV? d[ŶgCelEVR&bv|"g&~ ݩ/bQslH[e||| 㳰p x> cTgOX b%2 ~(>a,N@UؖMlRH*uNUWSM>Z!˨0 %)j9s~i(nX:O'qdiJ!K%/(d0*Qw s BD5'Ki5ؗY/;K8>@G Yv(o96ZA AVbY3jc>C%'Rv+T_"'Urz^Gļ[d*" YRTk%ZaV`Y d0ˬG+wjE$O $J|{;-K}gszE<˚ZZH]pq..X?C,'ec*V*ϾEY[ᓴ#)NdwhA0A آB@B"yRCvwuTuNk?O/UXZ >aTk!Ђ3@-Dެ![!GI…ȅŧV' ׀0ab,S#EΖ 5u D!9b+>!Y=%U ኂBBBR}!SQ7Wd<5exĂ :}Ѡ%PC$s~::\@}Ts:=?m\kQ1KԉgQ,k\VQEFT(Evʅy,QHd}Od ~[J [ %oQ RP@Œ Xx7\l]D]:"s@sE|ա\qw~Q]ӳzJ8KsOؾvj]Rz7c3ťJOJ<@ի&+.v[p`6yᩲ#/.ٱ]vٞf)*<¢IV!K/l[lfŖbi"?dmS>|_*`/w/?"HL~h;è(Qb.B{\⬰JV-+VF y o*\/ +z@ 0*(gQfUWsJE>%,}V$c"[3Z+3ol!p˲ZS_ ,-?vq8&@3 e,.|r<ԼtHȒ_I!|Vx&K+}*$@*m/G)ŵed%J6+| 5nMG1K–;X{8pIÆkIJOd jWsP!,lI1}!sMa(edOWZ6]fk3*^x\* CiSּy5RU_ϰa]'&tDCUE5ͻoKNNRq{냉gz.u3ITXN￐.I3H&e}e,'6Àx"m1 Y]'k%{~-u/袝x_xJpl[nmOqkebbh .D,bh:R~x(ħXpouw;vD&%|ĵݒI&khM9F8+{R 20* e 7xcFyi2pj֬YTl os:E=??p èx3R +2dH j u*m}}C m#BHݤxEDl;b*ImUm|m!oa)|CCku(v*\\:Gs7wg].紛fZ_Q]Y`5<(2w'w "/mH;Rm۽4.d y&Mߡcu۶[nwc_y)=tsq2}2R*M9fq ƌX^{tƯӯ}N>eI3>}6Necn-+ugsF{VaD!Όr:d4!xFfH;wnk**$F<义%'"5 \ $Pc*Ǯ+wB!.BU)2eؑ"qӵ"26w MܕKBҒІ]uWd*5+Q_y{HF﹍|6Z_LQ Ge*a(ATB.S3\PMPgƳ^wo#VpGLFKQճ$qǿ6{S R!aT)m)yh 9i0d-1mE^#"U[P-BNHm!t2 Ye){N>"8۵۲ \J*@^6eb'07_Xt2-OGHa6Ɋ2D0{LB(FaeIthƆ0`@\W V,evd,X$#؂LfsЭ[7H$8L"N:G' -K>GjO5sUJ[|{ RZe=X~R0?B yGȆOKK^`n&c 5͢"gjRVroؤب %’A$HNA$'Q3(QɒS۵˰AA@Ω33==ԭ[AdkTr8?V$Nq8pQE`1a ,C8hz"B899r\'2@.y@: ]#="͒8K1L.%"Tǡ. r(O> vt R+x\N4AZk%'"7z,kr :ڡw .<+OۨZJ T`SވMͬb¥gKjW,܂9/\/SD؞Ŏv2l>>/rX@9yrLz*0@m{n*98?o nXNQ}HSax(qݢ׊Mi[HoAV%,a Q-$v2F[Vo)A$ ų AԽm$R01JZHW `+Ek9)/׫F3J #wA}oHZL2٩(@UAm{uYX#+FstRGGE>w*ݩw:%eFӋ7lٲBwd?qYlIf3Z2۷|Y~_v-Z`;xw,Ѷ4߶8/i! kpܰ\A?dQ]Pr㔖tc5jf;rb;ǔ{r ?r7+Gs d̠8Jr+SԤ:X@!feV^Yɨns=^1+ ~w;G9[=5 'cր4ϯ~V^NFYVzGOEWQ6Wti>rk ڲeKw]1S|0 bJb+\K@->G,ۀR \>?rVUA }*f7ʤrNNN*x0ZO_k>OA?=)(O0; Xܰ-",'5.\A3*j Ɂ\5 ԸO׽W%,a9z_TY.'BRM^4T2(I@,fJ*oy麕V=eRpǕP ` hȊ]OMϫ$K.#?kId% c@pEK"@iW\* \Ժ!= v@e9. ?BI?`>Kl X!% ELl;^k(3r/lIՆyզ/R^u_@8uǪc4VJ(){7\BU:e an:;O{KYêYӳM8m+( RX/DoVzoXC7Y~c# |^$`09yqSJʕ)ΫD(5 Q~o+5a!^ыvE^k(gnW+v XűRNPJ߭KyUW}\\$ 7 ҏځn,cPՒ^WPYFqAKtPn0y. =b*Q$L8 +<X/^T|5zneCdF ':k|y8:Qw/w[2<7,a nXo\]u-M;IuBߵmjkm횫(!WYDeTH `M+")3,RaB=cG֔;8+HjRׂ^_HMn< + :g@v Bg'JyW,Tƪ܃\\~BJTҘ xemTo<~Q9oLj4Ȼ8n{ υ"o%l)mI<X"Xў;7&B/\*-+ϰ=m_i. K;J_ pYXV$N$PZ%JEimN`&+)-Ɉv#OӔ\݀WFi_qU w~W-J~O!)-(jQ ӱ:aX\O :n 똃ux:Ȧmбw~AB:Pg7B.::7wjnSb{7B!,}W,(AX #(uޡeq=%DP5-iI"RFtSqyx+5 \õ5$n;͜n+NE~ca]:[ͭŇ޵wW}ƞ|A7nkZ X|j)Ծml6k׮]ε ԥ<}/opܰ- mݖI0vVmۗKmN+cZە_5 lՆyt6s8gwpK+kc+]~ P,\'x俕~!T`-/mz;!@6d݉x J3I5(U= U+!f1V*y6:[O@V|YWVQXQ" {z<8X#7?dܪU)Xa)ǵ`5+M6ne.ïZ<9a ˱/Qs)b}&-*\5)oP&$$1,a ˑ/ ,p e"?$iz2IIN,,pO*x?ӕZhśiY' Ge#V')ȝbV b U 0tyZ$;XxRiTu%_.lO?yIQ)g#Eݛwg{Ԋz+YWz>OCmqJݵxA"yp>|}WƵ%a=1`퉪X I.K mY6T5[M?֮S3Q {;Vk Zn+\{B]*X`- Z+X-_5RZ/WAw*P;/A.$AӢQUu Y l%,Wa&JAUjoidVFy#(pvX6K@\:FqEuVFe/@. TyJiȫ =txJZ!iLĒ[0kk@.L_ 8pSq5D3V:뼗J[Il4 nX/#,*H͟ +uQ}:X-P~}opܰY, PnEVXcq΄uz\_[|fn;XO["h|NySzE;cfJQx Ӯ=Oo"bXX8~{ >LLB \\ҫXs(4-,V+y/,%Է13sMn{Da963'vmpՇ(*E53O‚wBXW1WPz uC7u㺌8в]{5;uJZӢd {/-f,{$%^wz<ϽG"wc1|mx۸g9ϭu+ ʋi}.sTN7xˎcZ4xmikJ]'(Zh?)n7k{^?Sw[p jOS'W%]a KXr\`,N TDIbG#0DOD N0IfD AU rDE<̥]@ӁV76 >"A~,H E$$<#>7F% xN?x C2,T_SSM0D! KXN̞P.\ M}r a KX’tZ$!^^o$q 4POZE'hKL;$^Q#+] +aY9EU̱J׫ |MEx`NCRLiȆ أ ,X#3vS0x% ^F, 9AY EXvAS$?X ?|qڒxuDGkb$sΉWG=טku"\H_{oXZbmuv3P  x !!P:G믭_mḱ`ûv/[4V,k=kOKͨEsxr#^|-kTظ so}]=}mtQ"y$޾{!9W+GX>CEƽZ9/  5k*b_li=e@xǽ=߼FVx*e{3bxjOoB,>kOc|6Y p"M:1Sz2s&]4 np i-=zX6{?U*v2FX wM\+!q.,ۯ-[|}?u :viTUK+j_} I`VgD/%*&>pW+?%q .y V,͒b9 y#J p=d%" A,S,GZO~.|6{N]⃫ߣ|O|тrCd(&mlp ಍ryfJ0,a KXr\:Ōґ0+imz:}J۫}wItIyO =RuD/%К&@ K@S%%9CUbW0QQU~(IQn9RR[dڲGV'`@%L+pnόRz͂ :Ǜt>]YHg-"OϽ-ULaC90QF\;05CA/aϣPǤ Y> A.MrO'6HD@?ԫF񫍮3 e&8 8ݚ꽋Əa]poMR٦zԿMMv*#]ڐO_oۯSP&Ae@ IYD __,hl*-c% 'zV\Z<"g{sHv8Jpv[|&miq9ڴb$ yp>ob:p=xܓO9gw{}3i<:qit5-X{&.ޕhWe., i~mcu\oҢoW(唫y=xf^`gdH=3b^}bo, ^}%_+Zj*C0v&eoQ nP:eup[/xum+e?Vۖ z^[q_{x3O/ _y5SR D\r>6'MPW;V,X8DǮľ#fE\bRyNwr]8ÎEc"ˌ%-=ܸk&wJ*@*gy@N"a Kp%,z@D&MSL`1{NÔ.Դ{IQ^fJo l࠱?_~C&T6:N@DCm8=~:u2Jߢ3@A4qGI(ksq:Im՟#RGLҍ" +(!~G_ ٔ,CD ">mR: vtv86 |Pf]2| m`; kUCXa*QJIA֤FugIaNu:6%.zRkm oz:!xѕ65JqLQ۴fwpݓts្|}%-t˼;w>W|}C;a{_|q+?[x RəӮ/o /N=5]J_v]i~U(^WzF|Ak7j_>3[ھgM=UJiԨS7X1_vqgJ>H"qjr^[馛J~uԯ~MŽ+Wk^}k:/L5FAt2GJH Ua4amOo!q^iu6V|]v=[x|z\;kܬ}Ԩ5hTՏkŰ׵ZkOmbX8ߛV[wpW= kYSq +_@wzn~+V瞮{ A֬YX6۬ZXM׀/g(ƢĢVlױ5kT&us ʦ>X+7(PBxiCqM 7)#C7,a KXr.I*J4uJ:vߝ }GY݆%VFtxeRSA{ hLԨ-I5-伏gھ2ASm,sN' x~m"y9_`u;3vJU;_EJb+Fֻ ^yߐ o,+K.ĭE^z<}'֭W=׸toҽ.W߫C$“:j}#A< p?z%=+[<QMQ`m=s*N^-Pr xb18ʹ(nLq5,qNmKb(11cN֪q}z:Pרmu/F>᳜%K Nkww%z:t(,zUL6j?ol*U-o%X׋G\}܋ۭ綬W^Tg/60ŻxR(ѢPgXWwK;E,CO]uںҀ  * MuXt=eTsp|p PY!QP@],"6|+ ;\ޣgr^lII6 }JV&q*6 QAbrG =`.pE37G؟A\1Gż}^T@s_g{; (l ~̤&Bkb)*|͈cQx΅['Ou]Xe+W7k0Oܰ%a KX*?-к #X@^)e(.}:($$RN׏C%Iu~Mi3P$ 8G (JNSf W(:Ɋ1.6SlePz ,03&SfXcq5Iwb6mR92P`0K1D}Uhn8~&*q\6]{^4NJqZiБ5)ovr,T@@p&ܕI^/ܵ c.Te+% _슟آɢ`YUWep`knʡyB\Iɩsζ /v/n)^/y6/MK_q`XA5k t>X0ײ%*i四+8?<" ? |5;0W.~uRqKܵ8+K.8eƀ<p;9c ]^7 $&(v]bxV pG,,뢨wꅊ#ɇ*Fms!qb8D$\΋OLEzMgri Bӌ7|s\b",l+˚xެ 3}! KXB%,a9~C*]E2C7VBBC\++"> Tci7V<ʯ H $g{%{9\*Q>1+@DOR`t[lfu7:{U&GktݪUTDCz=__ :K" ł%T]@Hpu-kI^YD5 vʳ]DGxORVboY^V>.VEÇ:*J\|W6G TʲﮉBn< Y-\m- 8iΣpi3^Գx0bxQ29^~_o~"oڿ[CUk<}t<[;s597[SV_AkzrZXB/5u4 8da7VE_x{gϾWO?]}ômVT)k޼wo0pYtg^cfiU?0SqXYzEER[F"np&x-4f#xj; P\7aG,!M : 1ib{R_Q׃<˫/0xGǘbz4/z?H|LJk&Xd! wJ}AΊqzXGFMD߅ nXBܰ%,a\wg:t)PBIL=VRk%j BEXpidDx$j;w%M 0uXI<9K" S@1Z0]vG0R8?l_BuuU:/rƱ@~LBpI=@NQTg(9I@ <2D8oʼn pO.>p`GN4>Yp [ PeL%CZQ#u΍#=<îq*.G*꼑ޯ{-^VP֧=.xy=Svт44SܙtNվ 7cƌ1UZ̍=v)`ezs}j8Xd7:95s*~ad<˜1JGI+{)R%AJ*ՙ!P[E^}葼v]yڥ]cO?=⪬vc,o{RRVyEW޵#w ժ[L=)` b12@,`,xѽ(GnQtla.r: j着b%si\E:XXn:򺇷~U: Q++_΍x:C2#0cNJ d |\p=0]pLtM`s,:8Nnȧ{\@G3v p[]d+DCT&mu F87yŊ ay3e<>p{ `2[ZKu~Ժ[Cm[v3<TڷbsL :o,p`K`Jm7QJߩߦ N?xalLkTRyu_&'<7B1 ɈA1IN[e`TbLǍpf;{\ \# ':EaQ'@%Tx*x3,bXjdC- 1^Xelsi|7NR\,xY@ױSO=U@u3tS0I߉:&!irwt_K/̽n3<(T9+V1Yo->ih{TŲyK8H D u;hBmzonׯ>=jϑP.uylC)/8{ pS+5㎓<"">r={dy-y疤T%y?H\b52߭ߡaIapO1]On$%B7,a KXr|KZStUQ6[``ޯԁA<dm^Fj1ULWIou: xLڻ/Z ޸!NBb PTƨ,Fq& .Ԡ%J㵢jMu+"P]ཊi5w0@;wɋzljp~}o,uvE˰O@Etj(pkO=t};ε.X.Ĭz0jpv 'U=;K u3P3GLyAǫ.sӁE u]<1^bH1c"S5x҄bTٯM,5>3Bk .8-[E+MPP7``&A$޽_G Ձ_=IxԪ[8ݪskIZK U/SN(WUKq;\sCv_Q7&?Py#X`{8sNJ-ԒV  pwgM[u7g͚u^'|757ϘޢOw, h5t}j} gjW2i0~9_ </9X2y.cbE>˵IG+ȹ 1EuN-P2cs]6]mBp%,a ? 0&d|գ<ӺZ$A}$sR&dב?~dDty9J$JSE(PתFRSVCmSuGH}%ݮ$jW:OkA"VG!{7st=#-\9u!@D3rJ8&;S |6tHO4 Tܷ|_qVkrwk" H(UӦXK-2E[M팆\yG9۹9#OߌI'*Mobs _2Ġ $%ߊPH5=bύ::ʆ*^H@rO~7QSLt b1l+h9kǺoWyS%u*h-VcRSwcڿTs :l5RgݐVo_ 8/Zt2Bt򱶿R+bM3LVt:@v~aVZYhGqKA_}/-[jnu:fI*-:>K}tR@%+3KvNIw»n P^,u6Vp&[?p[!_5oVmd-^=]sIvr@\ Y8ۺ;xz&b 4 Xح3wEpˈʝDQ> /$ rʻ赒ʽRj{Ie'_æH\37/+[ױ"^͊`?#  \%,! KX'WKM]QJIIiLGR4[D\' /y!  \KCZ@+ 4> d@=:O^.Q *Vh'íʴ/*Sy~ 𤷠E} \Pk&[_YıQky 7bx?k?qDz9r=.|>x=ܩtlzkR[ qi\c"M W;7nA{msw:&#Xs#H'S[N̴`A82u @bDQg(JXe`*;:OLFLp$U 6@".g`gA&Ny[Wϛ5O$mW1Q⦵FceT˓ ]O˧Ylځ .Liz쯜f5|Mb(>>.ǍΡs)܈/9qjs]bF#]ȃ)GьiVG?Z #}3S9Vȵ'a Kp%, %r`ܪyTo*ٻQk{' eH>'1$ %IL^LU&)H-UiZ$aJR2Vz^pFMA~P 7WI2ᬨnIiSX{4{E`Av7X%0-$J=,K%?xLgGOcxup '2%PsO"܁롎[EO5]KR-ѱfs=P:>\Np[ bQJj~6yN P٦C,]^C VsK&?T;4Trpc\G KܷzXݣVH(Ql p5"ZH&" `/bbxվJ|7{1koKVoRxJIY66ͯ{_ozbZm%X3KfKEj6 gM (kY{ſA Л~㽲v2Vv|n= SMJzUuzZ,nTgΧib3x"7iY`u8"ˌwD6WW+ b5P磪V~UOlM{cѱXql*s~ݣOG4i?n!!UJ>izeǃjQɥPUmPA Z\0jeK=҅'/yyۃ8g[:3,;t-;rNT! KXB%,a9NTI+H4 T" VP'RO])0POWrWd$X pEHLY u׼WR/pRh\2* >6}kuoJг~oݍJJO5 T@_j2rk>OۍvQv ޥR.u\.'r]j,!{UFřR_p}[kn48]Khop$=nӥOoۄÜ@,y/b&A{8l ~۴~&l9ZdmN1^w,W+p;JSL렘ZSyXА`,<{y+ȐpʛQ@m6 cusIj`UBY;v8UvuΝ;[y]pb`Q8``v)6 >)F Z=,ܰ%a KXRg%NH%~YeըoSu*i}#<E:oYd+, *uH)vڹOUsyIIIz4_Avj_e2 PV@S"S. Kg$fXjGt*F{$npM"YB5qLm^|.)AZwDk}OI=pBU*@^= /f.zE&-KXX nvl( .s>+&Or_tTh=칝 A#oGOJ5R4ӵC%gPBi+zFIu=" ?uLpT.8[=7[ZJ ;ןvut;y@Ž G' y8W3;7k # p- ڗ-6){mׯ>рEfD;(YpM`~T-S,֮A}[;c{H$N."Gnj Zʩ* (S Oȑ#>YJ ba`dlqkgÂi@ jc>}sݟn+~wZgP7k@X ȷ}Ze?;Co@__ws@P\A3G Eu))*\9^Tպ=MZԳ&M Y:2hXwnI)znC 6mY]*W=(?' !ȃ3bt1FЗq`#`NsﺙX ГS#^R{fh{4  *}ь=:WT%]gGSܰ%a KX2]0Ik~H0IQ_E \w% H5zt*J7, !T3ʗ/#`rX !'AR%h7 PRE\ԫjtW5d%:JcAJR]SXX+pDm/:&OŸKJ8/Js>}jK&%ҹY|ߒV ]2[!czvw*A˥;HF #hxAY,~_(qAd1>q2NIk@՟d0bRfQJ+T'&r3}g+\ j|[sܩNRUW:JءZe 0As XS Mo:-OֱNTI]*ݺ:VƼGQS)ֽ%qEö:-d涗8B]i<$ڹXb q *^D|܊-؎cjmɀQ_ѣ-\&ÑM[1%1؊m];{Q?c(w=VAawެ:'|T/i3dNd=?^+ap|xe")sh쵢JNM@-pM_8}TetJ^zRw%lKFQqg 75й~b[|5nVbW8>hK`'rGzhe2Pmqm ni' ;M?9ȺDJ*qRQ7okoz{79*l][Q4]*OfxxC{gcQ87x ʫnR{" Qb$3#HGs:`={b,Vm A;]<9O<=!gp%,a ? .'ՓFIDo+;L&a'CaUcйޡio9 %9,8^a 'RpQϒ~S?ӷGU, ꈻ] &f@bQH\`S\#yx~pQ>H1{:m#7.ၛYH>e깟Pp:MjCQtϋsga4|6u--W4[_{z!`݂xpM‘ґݺìI[{ĝ'K\-؞4ZZ4m׃ݢq\ne]-i[` ڢk‚n:)%}U V|3,}w"!@U\J4\ҽ ;Ph?)UijeW[&ݏw)> bU#,#TY7uDoQxSpAL􏾢0J @4(n:JܥS!XKp7e;>}ŏQwhe$n-il@ qHpZ{=M]M 18{.l p/ @YA  }NF}ul}]`Q3҂:EϵʹjT *W`%˾nO=[=pf+-S拤Ogu]rv͵,-GYWrK VsS(bʾa]fƁ5~tp+` Fm6| $pvXE9ܥG;hVzS 2Pv(X:_ߍO;_ேi7@]%ђE̸&bRܔ($ 2_ܐ8xҀˠ;JAr'TW)9ر?=Z%,! KXPXC@}cJL%VyޓItH\ ( 6 $}dS(P/%I$ q MU?^h:uIde˕if';8T=@>)xE_4,MrñD2ezbT B\W\Q筿P:xD6NyDba: 7 q|)uMn_s ϟ'ǧF?6ӑxpWLhMk\^?,yiRiɪ[_+M`Ѵ`Ѵ7@w)Ų.3[l*K#I[P wj烷߲x ?U~,[eJ;I?+Vnܠ$wTWWB^x|_ 6TW+O,8Kl'0/蔫 xMDeڰ_[tO3"U %M j||1# $rk?@4}($R X%tET``2Ӏ(l;oTt;P2ǠZJx5䑘O[ޢDC( :]H1]zsjVe"\W]RƢ⋋.08?.={g߻* : 2=}}c6Y 2]'TI;""`iO1gx@8hP^z5k3?>my^yi.E*TJV.&$Y;c)P zc ܹ-~~3;c#gx&B檟 H@**l[đ Te9?Y>POi GI>fӹ3G4՞oG,uhY 2@$A1:>}!{,&ogG\Q%-XW{?pԜQ6=3'|bۺ}#9dy6w_}IZR,}|ѱzO72gi*Jk7z?bl[%@;3PȊ[H- k;70-|t]t}kgkڵ 08pW~2j'a&h iܤrk9d/  ?fc~aDvi +ER, &)4TC"wx;KLF?kr݇ Tg?)@1P f!& s|c4]gX?iJ?Kq`&Ub1 2}9>8nû)?9>@Hٰ_bN&65Q F1 V0d}H')\kG))\p\7777 ҁ3e`MЋ"iTAA+0`#820'Hf?0((r ӡ1i8Qn@p@8aL|؀9>mGBP]JYξSLV]6wEhuW &`h>p[l 0NIB8Ǖ.#3矰GnR$Fx~} ke$ 8Z?Ml@;&%-/v7aZb!-Oo/H<B]+mktawn=Y&@'_=}Dڵ0csrA7^^}Im"ԠA[(cKW~P@PFEVS'4?~QouGhW=W_ cRv9?e-.p 6hྪ4s&=8)ze]T SѲ4} R ?`<xbuS{al鳸)dIHLdH E* -{M5ֽD}gI3ľ-mv|җҏЇзLGeIz3k{&d? I3׌uK>Lַo? q2 w_ӴLD57ODeYS (dj,*RpTÒMC,TsE]r˄)8p-At}%PYO \2G.Xة8y\   Lw1qgdJEo)G;E㚸I_@@\9&'1})đ&Hcl*o@܁?Bc?k?C?LO,L+`-}.&97[0qr"g=?#D ?F;\77pܶ3t$o(Kv'mɠ h@&:+ .:'I'%ʹ} 䥿e ' #A;ER#_R縼0`X'i ;9 0'g{/B&q$C\k|߄kkri?iHhq_)^!:\+,'7نLZ؟gN3y4c͙i&@-Y[c]~/Ala [sN=%4RJ˅Rŋ < 0PI-9z8a>֡Cww /=hKxkg}6X@XGcù2/˓s,,7ZV[ބZH+m =#*Xf ߔ>j~a?;d0L_ŽIuw@{-֬'+wG9FVmb]Iv|TB v/k_Y_|֗J[ T46u|\`mh`7]O $($MဗsLS4f-@+OvP2Us) _ %JVmst}(cl_ZnZxv?+wIy2NMbgy[f񶔼C"صMdRx` X?El f5{6/8-kc/gg'&bLJ;X7%6er>O=tNLƵ`KRC"{wmH#RC حsXt֭kAznnnnn;MZ fuUXX 1z 'pAPH|N``< Ӷh7 %+$U`.n ^mXj A{&> 9`ڸv{mkP9k@a9$אc&Ο m,T:0HN6気K8瑵}sywywm?LP+<J1zg6ъ=abf ;R>;{E{k%F2gp|_򡈀UkK 7o(.0j G>/?^{MR HV Xt 4~9OCE2 \ rN|2tNX~lenE*Dϟ4L:-2X>{>9&kR}gvKRB_Jےm,r}7f0X#FΝ~VON&rz><OmWp7_ھK:+%0ǮCлZ0f0]ڹ}GMjlW|^lpOۍ{yۋBsN۷a~ԑ%C~\on@C-j֮NR Z{n5v1; 0riXtR.`6˗s gt}n\7,!Hd~؄~}''?ƌⅴ-v1M$,w^L¶eJvR_Xc&8u:OLr*c\77pvwwcgyMY}GXNꀗ94_/:PTP0_>+m{1i*?Pjpqdž7 7\~YV,㏅w>{0^s4S73eo+ 0=f*!~{IJtL@_3cjN;&iŀ0Mf/=?vIxmZ̀_d-3]AEb>O.npfgۑ*eJB d>Cie5ܨ ]{׍'/=~$F8LEx/C2eÊ+A#=^UtXBZh)@ē+W0x)JժU*UXqH@^QRZZe-Q\W_5^, Fօv%Is=wf޼y)R$c~O;֪U'p;;SCˌ xEqKK~[Rhg5t⒂Dh>C_լY3:c CN@lpWV)&e†̪*u9_|f_/Vs'wU]vWOlp\QJ(SIr^plɒb׫|RՅ:t`؀ _ [XLؤ6b+ 5SWt[Ms1wl7sZ:CЫG U\>pɍUf(Dp!¾"/ Є` Va?=N?ҿ|'>Ҡm۞ӵks/r?s Nzch¸% ]ݞ={m \sM ~ꩧl{X͛77^^]#*W1IO ԊQ|Rhyv]h~3I L`{hܸ:tYg\ussaIh̑-q@_ \ QR0sԩSԺuH:gġ`w`۾ƺQ`i, 4U/93ӊ=%zKU f,bk}.(=yC;sE}Wt^;p%a$ .ʨyƴEp%Mw>v ei:OtlѳD+v^"O0˖.gՋMXBg:Ϊ[A%?MM7%@6ؾi~l;w0ma/—c>2зwgt?_h}f'cThUZ~M;`pZwc30B`'= `+A)՗IclٲG¬``Z")s6S&#`(.UTC3 8.O>ÚQRRT ϢZ-"`%LXb$\aB)&ϟo$b lƀ:H%n:h[n{H5 L*cвw޶ #1[5tb\wy, Mj2ູ9?͛gƌc ֈn5~E:t0 ,mzgۄH7t=R@jZԜ9V1.K.2 g0mqmȫ( ]Q*cԶI {Z-ZT`얥g(@/Rrqݺ\m(£ $Q=W^60vCRٮwa.oo`bĮ]Ѯ IQD/v)&]pnົH1|[R_/ ؖ hLB?~!ꪫL@`m$oA1m UO]kx|Tz :蠃4,_^ GA1~aK(XMLh:L Z~@ ކ>hCe<5\|ycnLg702@J "Bg0l%.'NLاV (&`KVd+]}c=IZ€_-b7b6132XF0SIaIXbp?V9GMzeD/\77p5F XqnTm&g `nlU@[R&,{a}&HM6X ט@yݺuȑ#-wGm hl d6s瞩߽H{7{ \'m2I1Hq?A`Lf_t ^/Hb!(}+ flNݞ^{%N2dp'Xd W^ѻ͘7ۊwVlpW"QzX)us}Q8 llԨ:2L|$nNƄ69 GD2b&aIJ=?b lٲ#t#x#=,՗$[b=W1ѯll]b؃"'a Pf9Q&h?Nh=I,qW۪m)&@qo[J3gǐ"$%c%bm$kXRUXq` HN6@0t&E} xo/{U΀fߤ 6鷈9izjk@(pgkL\KƘnnູ9-ld ЦI fRMtH#j MMX͘cq Xl?,r]ը @65XWW፼ jb؞ӭoF] .`Xb/8 . xv_Fhw x9Cb[w%+%ZU@^@ǎP}R  (h($FO!/94;⇗8,,jI z ׊uw\w"RMfX%A={E 0q̤olU@Fث 0:F@pXbb11L`Vm2)m /ZR ))(ኃ@t<KwՕbziReI ؇eK%HX,@uŚH_>xn9ߨFňHJN7ۈ8d>p*v2 H&zĊ;c@DjD<H KLɾb`"[b#1Y5 ޳XH%k5mלqLYd=1;1nnnnF0h*&ǑF֑ DHkcB` !a0!0V\,X\@`mHCz 50V %+QY2iҤ|6S\B,{ }HwVXkԖ5~ƌtͩXZ@[شZ;_|uS޿5غFVZ @Uo @@w؋j)߇"H:kV1U,X`W>LO>6欃9{ܭ@ ,a1vN6uVu2(VsB@Ҋ LL5jbReSŊ_c8Ĭ)L/u*ב8F"Đ?yHm^>}@t8 6~첌]aູ*Wp HV:)LV][һ(JH CT@1Y,1K\Hc&X`3\DG]\YAqe1*[Ab ,^Q~ZڻFAsz2@Msd*4~1 (mKL*7W B`Yjr$@DgF$J4`WPZ)pu  *3А nФI7- +f@y.ɳ eOL=b$`PF)\2З] ˤ8S2*u.&=(֤M#=SJbÀl~` 1X0+j6W bQ9F1(&99`&۲:1L_&ߓe-( :K%}~EhA]S%UY :תT'ĦKʹ#Cn'ǀg(A1*UA ?S[ ;L*u$e0( .'7e`=('iĒ'fG=(Gk0rDU;+qcD2;v,)oT`BpJ"i A \ot,Za?3^yϲ-پϪVw!LYD}{8ܽt0aw<;_?NswnÚ9agoH8`;xI"8gB|J nIq`?|e@D|0{qx⺊shhv@V=Ń } H!@f-ubЧh,tՎ:L=LDR]ZXXMDjg~%'Ck-bBڧnV"'uf"`c9;9/1fS/킴-5pussssn ^S;W+iR0JZ++p?=f0E)sSM=Qin EtҲ5Cgql·ZCL {*)jomZ^BmeԶ2 ʭa#@ ,#61d;zhQӦU#eڐ*@|wp3^m } ?2/cO2o'Ѱ7m\s<gankXL;d~O2$ʨp {ݤ8&{*՞bvW!afH Cgl*FE 1\Y@9f QNРnpwU+WW/=˝]"ECcBkI!\ ŀ(XP*Ł?hR.ղY~2LfALKز+t@ݠX$]qXbZb[b\SCac$K4a/DY Ez.!Xl,dbe;Cr^,KZƹqc~{j/\E|nnnnnԤujGI˴raA7JSWU*hmT5f(Ū̀._$ V*Ju$]t.bޫMFm35H/64o@(c dM+3GS@ٟ|ٹ\sPt1[yȩ Dڞ"pxHWuoV*kJKI׵zSߜcgwS隤kݷ{S5}瞻_=iR]+`qjotr+))i JHwc{+8{V zj3 G*\+@vJ)[+HYzk~^(i@=A|D러3ULL_u =E{+'# G+m3Ι]\M9.q>P~taNj5{9YMů"*@ +\K:4X96$BgJ[LI.̷my7hID(eb2i0mo9jZ,ՊcDxV؞ҧ}V-&mxK 0VAXiu)XY5-(XH௽&Dr}X&@8(cXK]?i [q3f ]Fm8} zk(;vl$b_777 @\X:97wJl}liU=E` 2OzjTb`sXz%8cV5k5ݤZRm7Ê~1 @_(E #ԑk |:L VhbAJ "f Fƍˬ 1;/j߾ho^zo sy#t =Am8Aف'3`Ch=4`%`"],U3gV;wr/3# {Q|ڵ3ãe˖9TQ%7",6Up˖--e¾XU013N?oC kÜ L6᾽R#/p!sK ڵeCS`M̬Yɘ,K 2@re9k1o#Bq„.$"40v)v2sيb>!@2{!ปd ]/@*Ch! 2rdB8Aցzȴi,a4k1UV3ڶmAi|ZY|Vn1Ck9Lr8ۀF܄| Ē%,ێܶŤ XsFFܶʘAGꫯ,S\^'`,Tf rAe+GZĬvprvC4_uSI]tɷt|w]6kM1K_RpdA2sE'X`r,XX 'pI]^u\ .7$;*L2Pwssssssssss۴]jU$҅ـ*X\w[ AB]d46m,ko,fṉ"`rAYw k6,ȡi`d[>)Ċͧ?/ܶLG;h ȣ<#Ǝ_dlq?`0+kW_s^d ɰbƒƌ) ?\AO4D@j:.M%f^_ رm VGkmGn{Ҟ>l H,IāI6={uh:f+VbZJI="Rf+Z3{3h> 3O'6 XwEz x/r PS :Ԥ( ( 9Xh|%sNXZqÆ XcŊv x0zNsVB'Y86ΑҳX@b ZDT.i޼:ϳP-Rl--R̲;>Xcr.Bߪc@E:qE ˟&^oS }ci㏢msX2[k d`3F"p^&G0m6['$$`!9@c `9C0Wu'H5VG,W1cQ Іb$"Ck5fX,mi޼yV m7i+J2a"HѢER0[_$ MM}L|ﲾfΑCCC!"Ё%`9tnnnnnnnnnnnn̙-ZȪ¶Ua$`YXw0ْH.0(AP 60Ro ZnKME*~fa/Pr.v2ua*SxdNl&Q<1&o8El߭1timS$b@[cH@h$GY *&ZN4}va# afGr)&{!ň$җv"STe6V_|M:52e_`7777777777$`k#F0 7]ޚ.@0ZE0V]g˶ c+iZ!/l^bd#rjPj-N~ dZOs)^(q:;ΝkLW2wA otwa ~IpS&Nhd@c'0hH @v&ST0IR(r]ov;27L\+h!($"`3$XcKmgp|%bE!'p X͈R@@υZgEaR&>-ںHƺ>dbh7gfs,e[2om+9f[oe,z݀dj ڭmP׈|,ni;$f,򞵝 eφvsssssssssssssssss˥<VڵkQI1}HME܃4kq;P=--CI YI^YF+Y%6o[ E6_XH%eh>Xw9 zDTRع 2Y|)ШŠ#_} 6[.]A#bb8z#u%.W}][/~m{#ɔw+9%+Ykd$a& 8 uYMޫ~b>$Il$ 1 8) \\+viy-o!7:Դi F_TһZoK/!"7777777777777777777\f: b&۔b7cSYWÊ؎ԌBZxi.k pT_ 8XN.LX{b.PqlŔ *0\aL2 #nv<>qv\C m1ku#?#_|AuEGkd[. ]uUVpPbUs=ӀMhܸ1@kL}XzuL]1yߺ+Mfo lO  XH.4*QWPfg}\ʕ+/b9mR=PcnA6CϒGUm*P_k?~|FztPb>}=v ]څ`5nnnnnnnnnnnnnnnnnnnIرcMN W)#Дe}ifv8˔)tm ,Z-xeH"bXQGefpѺEW|'lx/=Y2^-Ñ9@nAWY¼4BT{8Jk,aw[I&s&ԍ$ 7[n Ĭt~ywhޯ"cwyIkJwۏGŢ60bw9S)2peI|W}ePc1?x3XWk Eb1إ^j`[:B%\2iX 1t++Vѱ#ؾo׮]4uT)r=*?zꩧGfͲ 涕͋>Ө[nK/M<W- +*%K~8X$Yw ZΝ[d "iF{W԰aCb&R]`VP!G]kѢœÄ {aŘ6,XdTlm\4nڵk}PWm)&هXl[~6B>cv {9>CbIູ`r…j&իWԥK_ PQD}'DYg٢W_5u?cnnnnnnnnn#M7|@,v W3룏> I0޽{4rHéH 77e$߷=MR@e )'P$ ,$N>ۿJ\}'H6O<#N:X^P 9)ȬMg=In$ [9z5Fa+8Hw0Cr (v뤔DC/;斛mժU֙|gF>}},L>V:@,`7etSڷooHK4X3+4dcd=/XYgr^Iy6lm (aT)bJ%5Vfm{b3l0v\Fb\˻iY_R=`u 5)4u}n lXK"}>!O9 믿,&`Z$m۶mD*}hCF˖-c*V@n:s1ube]u5ׯ_tw]_itч~5 pv{}W;777777777jĕgώFa1c=faa>"ůO>Ū*c鶊:bƪWXрÇ[̫Fbc.q*QjLM]T+x2777aɹO<f+Jσ^xB : nوطY~h.Lz[Xa4=ԶV;ϛoviV/334C$=[bE{cm5Xk{vs-3NꐭsMN,Hefzz(P}A4 bԔeddTPǷ7 3=UbLG`#(TDTѱhY" h`ë[%F+S-}1C( àALS i77777777]`"E`aK%"_wu~&S_Z*Yd ~FTݻwԱcG+CY\HaK5X8[!; F̬4o!(,YLWPirx{ Ѽ$yyA~lٲC }Gb6Ȓ3Zxnnn; .-ψkYa0F)@UϗLLAhZW뮳 \x %&ؖyQ뾱NY<71ͺz61f.G-6ml[МKM:*&} ߢsG)J?7=)A$˩XdI%=P7 uX~ΰ:몂c]1/Օik .0^:SRjH\i.\t4`a~u^Ɛ4f=!L0όvssssssssq | VaL!Y[3S 0- UlM#}>36&}QtFh6M:V@tڊW X Ű;'T ؃[LL5NbiRV)`6]@lC9lWpERɄЫ= 1Y'| a<DZ{tPг6gFL0yc!#. W23t]oـ0X&5u/ĂqA.ŲdZf͚EbZ2y,-Ϸt3%6f\6Lv`DnnnD7<f_GCeG4AM7N }:i[,UT,ƈ;VX-U:IԹ>h aP8I=jҥΏ(u鉙A8+ q,@9",[ze9 \=oF!I9@ G0$!Yuy7O5&Ql$yHz 1\I֚dڳ/&UeyJƉQ'E״txF܂6J2.3n^kdDk8vs%JT|UdzX7w lfѢEV dGD ŘG (!0־kVLAv1uEiAu`Kamfz(^if֨\ݯ@gXvLH0 u"l DZ 5Kt-6E miӦv%߇ꉤͲ!!3ڨ#O+F عXƖǀ uFw^zl pKXUÙ>o: :rWL+3xR_,$;dwm1˗/[wCxyϫĊzW1z 4i@|Šo2Ŕ=&@FR[Q|V b*=,nݺ=*S-ō5/u|?Λ3l_J50ԉ`, Unt. uW8pb4JχA\zFϚZ`#sεd6&Jj_]3=%eqB[=c>n)k# K0}Lil6_o-=}'N&zWϟ% y@<7yV[I=z˅] THz^IwzmKiI̷4^X6mnnFmR4Xq]-lv3c, 4͞}RI&EӧOcuɂȔn,頠ԀPM:D: h0X47x I߿SSNj׈:k]:9Z6Sp(VuY(U-*uVH4{ՎNo'G' :kPQ`p1; _0#N ܡWL5-)h0[c LVg Ͼ})y(3}xz۫cŶFbƸ !5`UhON*(3Y3\Ryr*j"" PuPٞm'Ol*r=Z}>tsK8hx~R9#رcm}Dm{X?#@JEp 5[c:#qUgUViYajf ˋ( --V,dSGw@6b+MZQ W@M zqvن<ѵpj {i Gwut-kj{fJ,V4ufs9M3iii &PG^+?wi*UҪժVIW^=ZiUVȣ*WV};9jUZ-M}[UyjV?R^E*S^75[4N;S40Cc4Zv E#o8>{ x}E zҒ-eƈAwĄRrMf\~eJ/=n@"ooҸQ6W\ޱKNkudSNlZkwmzvSukQBC+(*+yhRe+)-_dKZRrGpY)zJiGUl7,KyG]~٥UsdD}00`U9u;FC=9sZ' t'j>UǛ*ތ,&EЂ%CB`"H'([BGpVϸ:ML!AYF"=),emYGޮer |%92 f;~sl"p3m}hYa$.Vu{j%%R)tf tfԙ1@Lvfm#mfYV4[|z[]nZ[Gש((ZhWPZ;:xЁ/o?y.RXSP19xMfVz1o =QߠI8 fzc#Q!VB1@Yĉsob=s}Oe[>D8tٴP/Sy CBj5Bu&zz-_W1ըj5ΟZGo?\mp]զʕnJ[ rsssssw$=ds1kN&&m{#h0p#  |<)9݄4Ԅ[~7WA]u #T+kӐwiҴa/쨉X ط0 ]&Cƣ]lhki°h~|.]K D &쩮fk`J\,_7npC n5K$a ` pp`X0bIWb+W46 Жm4 :nDp897W Nm:h7`31NQYn̠shrnY-U" L0npS9֦WP6+TP\A cyuUR^T:ۋ _`L91~A i !nvAj4Aގ;Ojr#%vr4Ԋ˦)'r:ȩk?ٯηtlv~)? +L ?L>6=3}pFÏ |?}?N˨rdpͷt=~خg>d]N>0eڌ1]G|aaܕUaaŸEkô%ÌB,if)ׅI&.Xc37=dl onкmpjC P-zppPa}l]1TCBuqO g}a,}Nχ o|82 tbۀ,e$Q}I0oRo 2ו52ǵFG)>ٷ a$ƐQEAʶ?@]8`M #c5a`Idd.ǡ{M K^fnn;`rs LS! p.`̀[Lռچ" e{\X$>YA7g=ݤ N-f:onLM X˱hn<U2s p sCaWe^f^!1׀Z^"ch&E PR7]f::Nِ΄7E_[H~qw@uaO4ќ."M8E&ЋIu ."X#Ux]G$Yzk.IܮpCmji֬Ҧ`椽e_#mAۯ =7|1iq5Gk*TEֹ9ީKgФQN;TKD5PԴIv0t=َF, 16* uB'W1 i bYԌ}x*|uَ/N 3M_l^rdwqjYNVd]Xm}lC{Әe hol7j@܀FլE{nD94y{qnIKF0[n_e_UՁKtNlg`.ж(.4zi8?pэؘX ݃F iFu}mDѷt~$vGN@uTj I7ܠPwX\u UGs\НlH*:XS@Xi,֭U@d}RH@o\߽Sx_Uo'VhlH'w](pwr ̱D<"H7 KgڽZ8F?(--]+*=}{*}Դxp0+ ">ht2ȆkXV1|CT6m@&x&M*Nr3xc8aźƈtw^ʙGVZFXr;3=;&=cl-o323sX 01a,^ /K6! 08bwd#fW8 ȳz9㚜*K*%Cy kM6!f50+bhC6%Lf85 !BBrƭ[5Mnn2bLh|#s1anNu~@:|O7@bx\ *|K,f&Uq'wغWV[jץX\tBE9K.!,e0[E7L_uNeWBLq7s^_ @^/ r7^j6'9L39 #Rl UM'B_p T4Shu"݁`GѻaIsCXث@Ҷ9[@*CSЎ8;Nqcҝ.vp&؛]p#I6*>\_֢ܽEӴUQ/5I ˜Ih p{ժUQR3vs:fe\L,#m?QDדA37p\]Ā%==Ws+S&Rhtp),)+V),&l"_g.v* ^~sx<\{ӝj=$]@UBbll KKN8-rck%]~?WS wj6v:v^ູ9)3̖* +Vz'0eHsY>P,]:0A1AcNA Ank,%P,Z(j 믿^"Z$h&Rm-Q7KǰpeyEa- [Xt^2aֈ׮]Y(ƌ~uV2{EӘ-}@ZMJ x90Ktoa,I8nX/;F4CP)!Bnv;7Bm{VM@TYޏ'?ѹ|ݵM}6HmcӒ0facH C8ׂ'E<]" D*<pugk<@qj#!Z&yM̴x0XɒMM \Xv_SJ@[Y^7:!T:P2!B*ܵf(vaV 邓k /pwc0, dlY# :co휱vO][۬:nnn;` Wn Ifi#a m+i'l5,JiH1 VĤ @U^[ l h dېH.&arXlmFRFHq{Qòղa*nU= 0 Yn=(J׊A[3XD/}  ikYaFe*1=#@M08@l5BU7Mwy,<\;"K^y)tQFBŊM7Jwu?|a b.6zg|'^t9LtpIٖ-ͱ'L_bgqK$"/@/U44v\wpi4g T2hIsvK6J<Od*ux զMBaZ7Fl84w51Tkzý]{KUP^PJZ?Kǹ˶y&L $a&!%g].USiCLVH-]kD5d{`E*7nw(~y%γ`:%(A͛Q`nt.l.յeC3+1U2FKIF.x .XH&HΜ9 K@3!4{ʀ mu_ޥ^J%-)<)6|d˰A(vj >#PAM >g@p;ﺫeusnjrk&OtϚaem\܆u']^Za[+ Aop0ĸe<XK?5w9v9ɤbG ;oDEt\DA_1Z(.-`ϻa!d27ksGydJ9]L%b'5 ^Kc:۬ _u#[6̕mٵ12Q˶M9ׁd)La Sp0)D)]%X#xo%C*TUsv28-ŤKu?s 5^uxEM{{ =EM"жL^ wg{G͝~CLb$)-V 4 xFsPVO+K&\Kpi޽I`MQ.aG5B4-= /?hR`ȳ>Ѥ&cBjYLLQc `d$i+^+XMH;SG7@mR5]y,7}IW?qeKw+Wr_4oF5t[h~C֯5Լޟ߃+ǫ{c`opUWʕ+lewH ,obQHکaX~!@o> O1NE+2nߣD٤ͦu9PVE}5ZI}ykG%S +_宓{s'}vQAy>KkRn?B_ے Ɇ7a SB0)LǕ5%Wˬ aMZb/pA:.&#r__q:Oo Jar']чQ R+/_'u,!)bC!)-8&D 299 >&c,;[|h) `{XΥ2^U lvΕ&]2XЏk`<ʖ/W6~!'u]^mS͗/y 9:A'=8 5/ Ol5om?^X: _&qv> x1<${0C OJ9r(Hۍer}L׺lA ϻ-/4jw f5K! "^݋qV! sC{$w+cQÂַѨ`e\`6YCQۦMl{>^L4fa^F ebܡg5j_%u[*5QD-]k;\b09"שrN]rmg:M;_u}NrCwcgq3o7kll6 5omd0)L! n PVNLnV@'&74(R8el;V 1&a^AylM=z,Y\z-o,7\uчUwnxn7_ƹ E݃5ػ/ƷwO 07Ǚb<ØR@ _=F˂іS߀bŊ>G4a3W#Zd8e-m49g1;XĖKht[ ?d2MerĶ@Yjʞ ǩ_pL$KHnK7SU_Yf-x߬e V"k}h4|@6! S)La:%+~mO&5@)-g+Xk 76c+ I|2|X()G[jݟ^A˖E>t9U[ SN}p. 3 $Z7ak]2‚BM w,d`v ~ꩻrZO (V@Q(eQN*Xj?̞i6 ۶kS'T_4uݷ2\u;7jrw@}ٲ{LRsw:upۻ}Eu+{+-a ]vXۉ 5+;"̋JNn vN&^R;,"1Ȫଶx\607HUc ~ZeS^eӥKW[0Z۴0MJW36; dbSv eϳ[;_:Ul5XShR\^I庂 8yirf6~aA]J2]#} bC0)L! ӟ7!t!d13ND?$ ɗ! |ـ'&PP&H^k5w~y_v>QVuJߍ;L*}g,]RZ!}r$E+HAPdc`/JYa`:Ν햎&)h7WָQ]{>l.S fϐV{e\dGvɛ'P~Z֨jʍ-5­4mj[6M&P[Bޝ! spPjmз&niiv!zl}/sJ;٤IR8%K\|Pͳ|.9sf['h'z 3h\l"*UhpΫ5 =csW:+܊9a S0i;01v)ODMv+揽?ybbD0 c…cGN&^2iRo@X xN&ler xʆTg?+ P]PS 8L\9`8yO @Hw {XTб{aQA 0ƒ2CuXIAR;{<|N})yv}>(';LyZ0H\d[oրWIB@Kؠg|MZfƍ׶k[GO>68# vM7W_M ڲfqO7+mXsףi 6c S:(;fejKXfl()^ȟ0.C??gIq=}r_hAhiCQK ,^.sY"#2 ʷ; 6P\О\E-P_W\q\|e9_ÇRGADz+Xv+Y0@`fd/袨"TH@Yhsϝ&;Y,#s~A?zxI`ӛԻLx2f[~GmoDG {q Uԣ qo46M} ,Dž7! SYj19a͉5CTۋ-ol[t=3n _ P2cBbŤ?`?z=bRd6)808&@?'l(PԒxeUFL~w,e˵.0>Zhay_?&K$!)S.ORW2yc=dɱL(Qm(;n %Q.&\-Y6Д93{e@*eid_|zE==KPp,?,k6[9/6 ol\r7P+eWD˻ iG wwvjA԰[HaWॗS s]w:|/4jc%2elɚ+gNS~PW\ǹrŊW^{r[ݮCܞ7H&G:۶w۲{7O۲؜S2~~=>-eH)gܭ{VZ}DeZD?Q$py~99) -3[e(Siӳ*{ckijgiy+zLjURn2}V D>)Ug3trS +u٠,>WRm(h}/QRv7ecpssx@RZI_$[^ XJ`/2ozHT֭-K/WJ;ϥ60g\o:oIj'pn8`|0C 57_^x5~]={wqO^Sp0Ie5xQpB1*x1y;G R6$W&z-,Yr(]LCAsMYnt9=s{s*~7lO/6n_lT<߻}lv{hx|-{VD/n <ϭ~~4Q^egrpn y%E#!k#~qCObmönn,-JY 8pl_mզp~M9C9s4P(c#Z{]no v/\YضC3I,k,j)-B! n I=ZHoo/5 v2̎@9)_F9+ga:,Z ~:] v:8ͽ[ˮR~'f|FH߼+ pJV&qw|`qe`e y򭥟|X%4Vz:Nexk?<LG7zJ7lbfus1`d_?{.ݬn脅n:f*7GbK3W/f9 yΟl{Y@k"Ț}bWtcgqn6 M:ޟleʹ|I! SGH5+٩?FN6N3:ҸiopÔ\"H.ۑĸx+E-`2K1ʡ]h!n`,-+E8 ))L{6OR'TёpJϼKK"_G E*kT ]]v{QI*+6Rf&WRAEy}긶~(+ TޭHLR#q={,p^jf#¤+YM#ޫ/v?;_*ف(Rx٤Qi:]jU-7ͫWsuڵq@b] m*] ,ѵ|Rr-}С)jh,NwD޽bԽ@^lYPՎA`8b0o{VB@ӏv/Mg (eRt]F=].3vo+IQWNת}C{@g7zn҉n9nօvHMC;hX/M[qMZ5]æ56-[;f,*x4Wq5WA7uΨc|6mhWaWQUHjf=5 SrVM5(Xi4 5oSOejըL`rlפe$\#xN=~*(|QhF,2 CYpaj"V3ҷymjV TT7#ۭP={VmW? ] C 7*ZkyfY=@n' YX" .EL,v Ns TBgUoW眫.~j|rufQ?r+S. DdB!"EB2jWIbw |uEՀ>z̗3|Gԓl[:4/coYE7_u5s7rewe*wj4quch7mSs?nFv~cYzJ쵂~ոg]㓂S}oWU=콴B\q3|r]S3VF!`kZ)>`"zo'.JP7ܳ{1lvnOFN[rs+Akcޯll> C0鯜үv85Nc5wЂtpÔdbQy<"8vYpD][/Of5&T}N 9b']MjdRE|L$}fN4 giQIL>gEe$dRk$| "j۶SQ3L"Q{Ftܗ5bu/j?Q2)R.%J\DT+4ꟑkLNCr7^%o}~uT ʫʊXx:~p G}z̼~'3(@܇/>z#<7Y{PoV#Ο(vINmX^]ӵY`k ]ecz: Z_K2ֵfۭIcS p 6mAY׽i)e N797@m恻q+Ϲg&} :vo7>+_ʽ.㮻ZK4yuYtwy{K䕺M{?20֮2Oe 6kUǵص5kV[kf.kj_L,I3Z:vnʩ*AUi=/^}ig񮹞` |}yZ3e= B qZI>F gϔ]eB qڳZ.pi{>D^@P %2@5̢RtT?pj iP6>Dm"BoՂ;+W8"ZL;=I"_ &椰u:-NjW\N]WHN~rѧ7m#ϊfxWv-糐1UP!'(c[`.6O7eyiyvEg:.tȔ@C`+jOn^u\&/>yޠ_eשlh`"GL]v x-[fSWQ (]`aXT7bfO[whe}Mp2?=+q)sym=~gu{N:ްb+ , Z\~>(a SDB\ļ`+,w-G0v*D+Yf MϸWc?v2.eLw(8q'0,p #Mܗ_ӟ1.Oba^Ev`)bMȌy7`#X&y.&&ePO13Cj%\>qO<(.줒NIn.nB5O1AfB*xEsyJNL4}L6ޤ#9LR:|긟^KDN]Mtm) ńhImz9 &Dzm1Lj5,*3G3jP?P=!>+Qd!,q{=dQ{:@\(OP5;)I4Wx. #JGD3M@ z?0Bu> ƚEEKn9W{\y|;Rg[::9u"yN=ֽתފ뻾Qy9Ԙ~eFe@@ p 7h:Z4zءSOpi*nވapMpW,u{tcϚv(8׊ۃX _la c''gn+q+d`y۰.{1wm1(-?=[e[b0tܗ;`{ m#2^{`օi+ܬɑݟ3rO<%3Bӆ lRT%;Gg#e/y V{lgd]I( V(eϋW<}N@R# 0,2`:sTÿ럢8MY~˛8cpW^%Zf T\M~Am:pA_=,D1Py9mܡjQ5kfYJmdIߙB"'%WƌqS|k3URRJ * q ͣ~$g,lȥ64کvy)`rkGeW0h ോ8Y0̀~KEN-P>>ݮ> p(Gj[~çiK$Obvws\'j .2YQr>+Q@j@,plLZɮV._r+צ׮`.rAJ  -)׉@گb@ ݢs?;Q/˵P#fҔܧc͉navmT^K>Uhb{D{Ի}B0řgøJDd بpQQb-dx\X0/^b̈= ֲ93kG,s+ _eN B8©7/sme-P;REq<) 1eq̟s%Dy5j>mb{14sj lx6Y˳klEԯ_?7A' dX;;)NcA=yH^_ $МGPĔa khle+ &H5iYa%(UN5;da,Z,8V8ܣ=EZs 1WeAd 4}̫yD CV:K`r* cep24Xi"}3[x'@1$1 X^Y4\,xsr_4d.eW}Ԣ{I*ܔ:&MGJ#WU:XSvοUT\' 1xcS/qs}k?[H51" j'{qe˸۵5/O6*]Z kӪ.FwX=o"%E=;n/]nM'k^qwuԈ\ҧdzONp.)3{"h=ſ ĉݮN4#mA\Ӏ{tw!8,4؅ȶy)f:}029eW\qŝ=~@m-RƝWkŀA6}mdVx7oW>)6@*m?;hS.mh/:. s*WvG?8h}uPGr,<4O)ǖ~O {yv-]eWw|v4ZA[k]p-?,jL]gyWJcf2P5j  ne$XhS\aaOv4/ax $'~|γ7L!}w= 'QC-Ae;os F<2w&46H5 ="ؙI1WvYh$5`ŀ/ * ^ >W%q{/g3L (+夼X&@ILi|:=dVfFqlecྜ7~P'')y+QW=G)< <+Ѐ{Խ0uKF6f2oNaeߘ o:wJ`M p)0ZZ\wf3n>͗zgU''/@ܶwe`y)j^@ѻ/`PN7 ֫k-}}gw>fH/YiRy.2_,x3+++,`2͓j\n[xIj#IpmJ ,eGYD=='v'>y Wc%^o/+A2Awm:K0B KQLsjՂYW,Wb0 븃@\mr h#l0ڪnPWi@:^EkV1hU^bdJ2e`"v񑥍U[{hcG>#d~v/voz0nm9*mW]fH&pwʪ,`]@\ 0m\ks_Ѣ_Ymzs:nuC% xMi6YH*l_?<EcfrZJ9L%۲K:(yUQO[\%Į׼)j;"(:es0-7MZ=<3jqJ0)L<&ϾRе(TsnoՇ WZv̵8+;cYҔ~Jcbz"jj: ٚa,L_K!g-"8XBc{:1 uफ़>|^"I[t**> !^~] Sݖ*-ls6p4>Hc%@_A,ڑV2 FP9] fJA梂E8Ocu3a@=8c)cM$bѸXMqa!z ^F1*iB?# ƊOx9;" l8ǜ;8|p:h#J?y@ `07\VhؙIŢW*""&9c|!Y oZDm?u GF%IWOcrX \>c®aU * U1+<zVu,^3VUAgJ9E%+~*LقF̤g&I\Յ\N2e2usU [S|Drq,P?7?mI=W^aq|aC_.6 L<>M}۳YS2gnrl2{nju*~5ݿsEw^ l͢`xz+T}{ڶScWf9EO=;<_CU7b^}Uv-YD}?QcBx+9+6!Lh v cg`LJFߜZ{vH|ʋ_ 4%UR)Hmu`LDC&,0Њv` nSCu,t'[@ C!ۦ S6h=U[CJZ X3M9@7ou(C0)L61a#ހ/hn<ĕiin8 |(cES(/sVX#XWOc*n-#Т w8qB0$L -j8QqλLZq V>kk\EUzG3Xr h:}5.#9-N.ӮF鲱& 1&jRI#F f;_s>^gs 0Pjc[sGu#^e(]?*_ؐƽk>z`ι?ʟ3nWvlj+[ /Wvݬ`6ؾX  a6&._ k4親y)&2 B)&RFXa?Lhjb9`2'`!P % :L֙]kBIr!eM@seFɫr2wlfG,I)SU Ϣ2{Qr/&juM9ʹ}Pny?;ept2;g1 &i0=Iٓp9Kf0l;ؽ:5o>iQةY*E} ˱@DekgB쬅pd EKiMZV\"\矶@mߐԺ@ܸj;ʫp~{T/{ν {.__~,2 8KղS: YJ-/CmzGBj .^1hc5'^W>3lbLa9G[3Rd }Ĥ dmg(\iE<%8p7z&sYSz<{-Z=ҧxжv/pk{v;2Mbal_]-PXi4JVv^)7 dv7:A5,xʢ č*\Z3@jRB @F*inz7La SN}B\ ki<3UsxR [)evWv` KjǕ@P#YBq0N..8u^wy7K!,m9Hssmߣ:Z﫼`ǿEp n9 W N!݀Ƭ3\>A@a*мNϵqWPGpOB R=R.]R/KW#J1ʏࡠ7_ǭε`CenbGqA< ~'+V;0W0nC.m\P{T+TU>[nbI(evPϭlËAp) p*U9&Lw؆L^49;F )]M ju$d' Ol۠P2 grՒUǓ+&uXc'Hm$5)3>CELg~gdVNX2jDn<_Z0y 滹po0@,޸r~ī^v^_M  oخF٘`f;Q SnakШMO>8ͽ[vg6 ~M\g*ߺ]Av͙+v7La SNSbX9c\YLE냚 FuZ\gW'_܇vQ$v;d'fb/ikڎNlJԸ)`iVVau=vhx"Ү .$yqAf^u|&Tq3HU̪ŪbW9~'s+%] D;YY|;.B]/! Sp2IgM*i9SQz?U5PsL5P3VcIk':PG5A}g~K'yf(Rc>]4GgjhƳn{ VL^05V1A&ł hN<+&k>w:KҩwEz*Aٚ9A=HacX &^i<`{pl"A1϶VkbށPkeXeU=eij/бhLj O.aӌiDúw.&U* ܪVh&Z֭N \Ct6ޭZgu)Z:~tpP)'(yh+Z)xrf: *E+|:KVOP"2^ew,d6uoZ٪ktO<^|9{k?/ `({JY 7M@7r?6u=9rweTf.R`vK]n|\o3+dh۹8y ϐu~"Sq{GsHJ1d>e` 2K,haO8-xv PqX O3Xqܓ9x &GYW )y Rnp#FTM:|4yї {駟v9ƥsS6?glJR]#w'.Pn* @E=! SpᏂQDŽSjD0Rn@dXԧĉ /YI0U2QgʵXʱdnԧx"4̺yl`bxrk$eKRKtLiP snf{5.|Q)k,&1YęY:{DxN|_rtyk>;e(,p 6N€<,[ p=mS]sA `;kg 9^g:v:mh7u $7`T3W7La SNgb>nq5!pv+q*uy2WBʘ';J.X~ ]V Yg1 Tc̗u,Kk ;.8X d@' R׎`Lʄul <p}6cVWyO .S}D3M5ƺdG- f9TɄ'q RQEI)pZ.H0@<0*O^xcrY$ *v=v5n-狀xOc'Ǔpk&A.`@"\7AGZA|LH`@s`! kIwwXϾgTNrʤcn9+fU%Xb-VO6Nj^=KN.>"=vx.=,P :Dϱ P%k E&j}Q}4[??J;]*mj*Tj<͔} \d-A!+y=fKf鯼Ty_|),F_8lLabρ&`iL8I nD)(0u-=^QҚ ^ `_)넗~Ϧ器K1*2b%KM|yfx{_7\ԭ#p3S3MB+f3>l̀-:ejt~`p<9kQfp3mŕTה! Sӟcjd؁tZ 76p0`G(;r /+^>ob\֬Y%`221gy`ax X<2v6X $q k.lDRRnbG\oE]/I\[kHQ`[lEK46XX@2fAsw%s :v鹷lL +qXzʦ53nƂ=d8/^/rWa%r@_1n±<'u"\{j`zEy(P鳾J=1\) Q2wg,L! WLi=R~Mҧ{E&eh prC'@XoWgB5u-9:[6hB\=fT -S$F)yiLjgJv ~n ֒v,%B/b pU35y3_]ˀ˱>eX3?O܍ p NdM;vh/eZl_ pӗms2 7{bB`_\iܜPcm!!sb@2Է! Sӟ9,mQ`eFܫ+4g3og'n-~KqJvq!b"Cy[j<*b8\ 8#땫5^]!%yԻ 1QIIM#"b5jIk,11WB[@wc9=e mf]a~qz@c( l\cbYc(fq,zc?V@:E[bEE˟??[}2`;cul= hEխ# P}<7L!mw=r遍@\.Q&Wc>E^% )5 i}b a|[xVM.Q %7Q*svLj5݆ d]S?Yž+ =uOeOz@c&4ϸʕAϻ}U/}W}:W3K=i f媷D#Y9U/BYL|[>~Aڤ60K+=Hytm, ^<{윔l(\k}9>Φ pW똑dʟ_V, " }/U&L֭0)nx@MPh׹ygK3,FFF(O9s(8IU,^{ /^=4!@۪R=(UŎK.N?.+@ 7^cW_{ɽ/]c&2/6Ȧh>P8V}2 lwilcMɂ߮6)ے0m{3 MmP$ rQѲH6:0=~!~vP0oǓ XfHL DV?2.t\YXņBY ߞV;wTZ{^maSʺ'mQ2)9Z 720 X]P߶Ǡ4`\nFGеC j`͇-AK땷pw?Z( gܧS n0Ĝ-b%Kd!_| *m5Wsp `xeBglE U.q9Z;\GO?y/@[b)dϞ=BWjl QΏ=| zBMWXM 2BY qb.!_R73 蜨sk sOYorWH@& fhwИ٦N\& D8r7A~k{psY9*nfvXM7La SN"h635 O'^8- oyt`5-84^sNsSqӵ賯4y^Ae2N;}d cGxKN3f k;#pa9-{8 unFepM 溈ܩw1{BT?'(qkIqK@-Ob K ƽ~2 x&P`/Fs`ѸxS8p ۈ,,h|xwԛcuT6g)q8Ww?3;wC 0\ !-"46j9ϼtZ] z"1Q!FYYU1) n!3b krI0I L9_ef`*s5\Ϗv-קg)e5n H`- (LQ|Έ0=۝KRb3=Vl n3W]of *91vOvyFgmv;[ FVO5L\reWmZ*36͘:6 Ù %K00WI^.MMU?RgKBEg3]ѥRwiK[ ~t/ۮW b0D>(J PJ  Dc4mYG˾30k=ۗ[|7j_ǿGh*Va9>6w7&PB"^ٻlP-7ԭ<,$f Pu5iY[Vmϛ2N{?|܋-P3W8O. i@^†W_}uJVRJ.M{`9j|>/g9~Ae{ULK܈E4aa mv8P-~v @Yv-p,,bm#Үi옠][a$CϚkRֹ)>I\S> {: J`j,L bP Tߏ7vQ߮/.z``dX%@k + lbE( IDhٓԪ>6rYes#A@^!]R,#~9W1^dn6u{M%ۀ.;ƀ$\`|/8\6"hg kWiw{~$A?v! Sp2k& DlɧG8|4?j]3~t0lPaX=R42(B5IN2$m[l4} HCDɮ&\GKe/@@*ZD\&L)& dPy B\}=hB0;.໠b33fU u+@05>9wAQڽxkl x#[,M5kbݛ̹5'rNF#Lдg{tu979/Sp7XvL/?H dm/ O HI.FJ=/nL67~ːjwɥC>/M*ٳk2\e磎}5uk N~/[C'eF Ej;|4+ܺg]jP/ K[~ɩoeMMnIXu?s,`9wre Q۬qe^FKc&;lѪ@^lJ ۰@w5xu4WPQqFW[`#+l˕+) t]N> >n ^R{ "fU_|n'Y6Ċ&)K_ -x2eV@0=ڦ~WFjjyA:! PaGIj["00lSqY5aˢ{GM27*ߦHCJ, P1M]:N_}\5)_EtL 54̱3o~7GgfQ][t YZ.@(7N6x8{,j4m#mT' n0|`nvPlzwg5@,!V?kLq- ߜ{". u c)>5f^,rj%E=BKA:ҏnx;2U担;/)d=.=كZ5s?b;q% K-1.&K[kI\K!!u<4ŏVcm-E]tTUwoJMߏ*ĞUÖ/Hm]+*Sܟ{${ R7L!Mhș*`c" c~Idu/I~:CL\`BHϘ\s]ݳ "&4'\5V s>Lp=:l4fՔL&(tt:GZRkPG<;=6uM;A2Q^'Ndɀ\2`Zl޴[5q|~ۭyͫz،l[U@j۱luvJJSKCyoiM& (l![6ٛQYq}=1x|<}~=0ʾb~f,Wlb}R mr6l`ݪ~=;FF^Kp$Z?/-O^B' -PIw?h,X|xG૸-'U viaPdHrԫ;o?4_W+U.Y H0nZVR6nV7v,ɋ)` mܔAw-7ǀ@T|h^xYzvͯ/[Mm;[ݒ a~wB\/lR) @Nl B1*Yl 6 UET]NJp=͙սqkc }{ 5TS$?:yIIo뀒cXl NpHRX!:X,fM-8:ͬH6 RڟI=6W^y&P1}ș$Xv*ݨlXm&m mN`2S ߚ$M(d(?Rmx©6Ml P ]X jWk_3@&@emz؟fX%4|j[I {Mo bbB5 eOGvNw]+P&sI>벙6 NYjʱ>n+%/ɎBjعp=<<<4aF(6NФY!!!~?F 6) ϾZ]\ZVr|nd2!UyKM|<`'%8 B!!q=JR`hZO hqߨ߷!ejdJ}]EKh2O='Q/pI\JIZÛl :}yI2 V&7J`.u&+qH7o,#˯p%9P~.,2(vShOb.jR_lp| w؎~У~w]c7B[o#'=H>/mOg>-Psw{ans u(ZL_\QѸqx?gKeE ۺ3XGr \~GlgsߨM00{Y @&iwSy땗Bzo-cz|c sCQ-׌J Úy eߞ~)XjߌŮOPx[caQcjfUx6Կ@3.bv԰n- >现>,IeE߄=ڄOx74yԫX6YVKJ-~;di<=o,j䦐2zܑ;EW ׵gIV``$>ʗ Mci:[7:@NX9Aru$.X,ա.ۙTG󟫎uh'%AAmԱDm`?)Ha6t#So_%NNhI,IIږ$b/ l&KR`":M>uo& 6~  6 :@}ij-,Z|ZgHxM ox4elG}3'$ }ﳮiRNˆU`-JdGl#mr 鯅gخQuAqgAZwSfwpoއْvMmo ̕s8Rڞ%壭W1>;+Jfu:Ǫ4m2)OZᛪWN@}\/9;P)u7W\6!OB5VJ) u(_l4Q `V61ػ)~{0 Mopo)5oG`l"::/9?}8AZL^י?(zݠ]ҡRrmuk{DuH:~3y.Ǡ9pX!:`7iBM:^ &uvs³{J(QhNKX 7>žoࡱ} МcHd,+{x8M3.Xw'/'X'+K)J R#4VR,%%]z TUY *U\@E@qP1SY&eG&KD9&0_:*5 b׀kj 5RAu1GM&(vР\(s?d=[gP\_\*ϔ}h l3I_t_9tYA2AԌ|$̦gv&khi}/v'p}+}Naٸ139%m8鏕g2;{ȠMJj—G%FϘϚ6Αadyrߓ*tn2|ؠ5v\tx=# 9L+M%[W5gMIf 2+QG4EV|YOw{JS1C~\_CG2t7sti#iShyV$/xg/Ү Ҷd^B}ƒmpS~@j0Hgj*mCMtwnc ۂߍkvI8O*XXˆi˶Pr?G6m?f9Ͷ;y^l7j/[h5 0-Y۶z\4Y$ ?A? *ex*7@+{Ht7-q.\$My,~qXF#4U(ch)D^Yxgx~^W_!Ey(ҿX"6`I# !vsԽO|uF{6 {tMTTor=zsmYܬߔmN `[ }&3nb7?|'؇n*uNPTC*ƭT$6xY$I?\42~g0v̪$VTS9RS`=-۱דS#@-KX9_VJkd q/5Ǻ*1OaR[ŹR *@;τtb$)Z(9X$F@eJrlg6l{BQm%*ֱҕӮ!Cuc`@r5T (6(ζ0'/޽l#{hjO,pӵׄ ='䐷<@-d4EmO /P [*{ B[ڇ(vX.l;צ$[I& \g+-qD_qןckӾhP/QQR 5/6IH=O]LH}DF^)ա(cK5'>m!p?v R0u 0Շ6.'U=)%(@wZRȘ T)Ղ+ՙ_E uPFx6:u",$P?Xcu]'X·O4Y@X66өS ".s.+R[x k*u,$v;ʦWb]~a R&~;%9P>O=V1y>UFZzv5g j- yN5v!6'7li'n#Jaߴ1,CT:t9&q!agHL[7\/wq7JWi7xLJNX(h2- {ݢ}qpLe`-2apq/Pˤ6Y@$q}s~L;J1n&)x~!ϙb%IjDm-OUTL XJ)mL'ӹN@&σ}4s~^/Ol^k@qT~=: h:0d.ROyRyVmR$ܬt7o#XGb-y'kK^0~zBhj߷:=~!u zUV+}y$qx39ͧ8W (cf,GIgLg@`N9m ^eA;H׋\V&}kD4Ib>K6}2K0&d`vk8"I&pc +G  ޻ EG9Hyq\'ϝsrn &3qhk㹳?}zIP"c;Ẵ ͠<ܓIvJ?QN)tcj RBz("kw4{lS yɢ>W=Ή0%ǡMmYԮR9&+ٰ &տ|`JI =<<:E+IFh^KK,kf荝)j8ipZ 8.jۿsĵh`WL"mfƀ0fxxX<pM{_@0oe[TN' ]2{#3fGCҥ¿+.#L;'퀸<#\wR*?jT 4{+|0W0k`6{yMP{$sݫ{O@|^`+%X(pYjB S,B9k<;'V\HIkyy^!f2,Q cqec 5+W3 ѲkM4|=[M= Y856/ȺZk+uJԶR $kû7j05y "b@)P_MU5whSr8 3Vh`{YϖxK}똱53p8sWidZJ)o7SBW/ )&Y}dDǮN2pwY8e fQf1I ݙ$~c -'%!(X5 ^F6O D1Aql4;wm"gOGQ.4 L3iYB_ /H%eBۊ"4ٲ89 -[d#g1wÀ*XhV+Z(^%"MwWq P%|d/&I#Qe"L5k1`f>':4p⿂,Km3 T`5ƳoePn.<\(e?jY/ZJ ӊ;O%j*Qq, -ɫuko)ZdNڼĬ =Y -Y"ru Y7o8AA({ 7%zTV/TyT?nvH葦wfPi͒!=Mߩ8wi3 *iWF iy /7-XL\̍U_eEw$b߀`^1]s%ϴ-Dmw ,?e?8Ѿc%p;^{u<|y-x`S!*t737|?uYǵaƲ/ an2i0QOH^^zxxW4Zp%$*H }A ,dn˰e]$Lh,H,ca m\T$neoTUG#2ڋ/،y XUCf6$IUKE%D3 [j $IHbKF] E4cXD*N}km_5LHuD;%{<sKu9g[n-T._|_V%I Z$٬i%5낔& ;odzqg_Y;2ȄVQ6!g&6~=<ee^&Q29[BfD;WZQHȫ)vj6s$zw3ޭ%jÏ82dqJˮ 7V2xˡNwCO:O; 1# Ā.`;u& ɋև ~]#ػڔ5K/p=<<~(p%Y\*B!+" ǮCJc,W>{ܿaB)%c9 ,ƀX230?gYrXu>C5(7U6 ]E2Fu-YYz^Yz仓U*뵻yF3SuO:jQPHeY"5AAU_"sRY7O߀k1oZ|Ҹ%٭y:|]˂M?^zl=:>}M>5&s&j2AH{ k"E?pT]Iwq-PVv0@N G&EԍԦ֠ MnWo2j>W´4^ls 㳝R[jC;, / X4ؓυZ5 4lMh}H5tsWI(|Q|,ko|g6r_&8;^^g.pSvǪ>(4}Bƛw ?̝Ŀ;y!V)uoDmzzxxx8p/X^Q,1:p $cd/X#X(`m (>1%o \p.?V605%9zѢE|5-(zR+umWPvZ29\Z 'MԱW, 2BY[, d/:(n>VI.p\LΖ|_2LTU$ǑTR(YBC2`vs|P"?YwCX^z1o޼h„ ٳmBOv}VDZa3>jnzQ\\7pK|lM'xMlxԌ_BQB'F'}|vuGiN?3=^9cé.$\uݍH1\5ԬzxC/ _fgxRomq]o׼ɻDmn\`ܸqŷ,|ԨQ3KnT?!iJ T?$rHbaP~}Nvxƚe3 Ï_hMj4o=[^|o,, #.HS%jK-QK޽p=<<:\%'+(Q05BY+OK)^?dn:C|=MCOuj_Kj&PldHR\V27b XJfЖ,WGڵʧжª`\/e ]/kV`87ӗl&:zƯffi~_6 w?o*un8DmX6<~96k89܂7J[.Aˆ-!Q܌?Kp=<<<<22Ȗ4te Gђ|nQlbXԲ'NaX0ã PM/SOrʑR%MyH<}V#8󑔥jD?ɮ`] 낌'l -PR?_ձ~u{_}YlnնG fqJf Jj4ZV{0(-l@|gc;س4YcXuڳ33Y/^C"E *;%pMyɢg! +X7/,X69K& )[\u|5@[)^r:vF7nPށڠ)`@*6sHpD`{K6kEAC7 k5ۅ;K W_Dm]8E}vFqdYO&P\ \qM%>rxI޷Z >ۀ aavM9l=Q[a=ڋJ~۷XQ:V`,fϞ ~妀depl/k: hE6\7N8GAs9U PNQra6 6fMTS٨et d% 2e`VU #vG ʎWpYTx.seAi]Gm}g.?g^OV &~'Zڳ]_.pww_r y!̙寔?Jw~ 6.% 6^|Ʌ?ҞVxw mR{ʅkF>WO 2wEW alaMYi?N mK#9?^3ls Sr g;l2zo^b:wV4+]*%WPeK{yw9:|J>gۮGyĬd5PB w+3{J[⏒wUR$p/YLZ_wH~>]lH}I"9VSiu ,Sr p(UDxBPFP畗Cfo= oj} >>S>?}je*_^;NzW{SjOvXa^}رcoСC^Qǎ~ٳ}wIױꫯ~P4e˖Ev[3h ’cxmEZԭ[7;dyH駟Zn,) ֬Yc$1o<;̙3#FDUum--580bT|讻όD$XVp|6-2H{dr$u}ƌv ZJKGgkak-V OhذaVE39ogPJ`;wlT1[%Qɒ%.]* ^x٧lٲpr`W?cPPKBf͚?z1)Q~;H `q7LDيkN/pYA < 냠J'%{GûJ/زI&/j\<"ͣ9k5b*ft-v$;K_x-cPʇy~a!p%A/P)V?/]/a_{}ewcIu_+W2T0cZ)˖R&yj?ϣd7ިRnʔy]→ԟPPlݠILΓK3d|{C 8o-2\{>b 4(6|Rd<$fſ_ :V- )smʃ'38ݨQ#Kˠ"Ap$b s=<<<ʵC"ƺ~QÆ fruP y&81#n -P5oܸsq,1 KUv<'Pwx{e}xUic~؊n9I>ըQr$vO>}or6,IN[LxQFٵr ON˓,qm,?#ΪVrYN2%tj~t$=]/xT!VC*UBTIUhr{fmTU^2$UQȗf%Y*XǍuXp-^vSQK籋XGڊXX[^2h ֖=1J|RXY]6hf $e~V[٠WVV!y1R9#Lϔ% \_y%um!\4־?/L'lY4!b`bAe[&`BA<~ C()i~.3XGإ/~(gTYcƌ>1>Muܝ)>ڶmkT ڨb1 JP믿G@X^CH$9p@ } kV`?նDn]pNyr=k(c+Vh av8˽ -רYeY>gl3Qso+W}i,X``%9p=,I]Mt 1H v$&=] ɒu΁ID5@:j`>[Q&lOOF̭YlͫJu'K Y#:h av]-JY:[ yVRNP6.Tx}TKREPA_qԩgP`G 3t5IKPyP^~ݓ1`YlSQPSQAR9Saufd~ҥ5N e1ȟ.tұ%3<9^BòTB(x6WWAnw^(VY$e}rT?T{ʔ)5~D} +VP" ꣖S_s1K}쟮~T{HVxEkp^]clв׵ƭ,z[+Q[#K;榟r׌ڈ{xxxx8sHe,vI' < TfV>zIS~ڪ]s,[`3VKG=v]+^EijrKƾGȋ&m+XXe-SOB ?Lim:$Q;[T[fja7pVp R"/ m$qL]6(3>|͡dg)Fm-VJZA@AޡRN U3J"-Tn]t?vMT~$Oc#r|fĘY5'V3sD a6ʛe,3`` A 1C qYR1|/^_1`d+Fh >Y RlJ6R0bۨw.`P~˗/ z-a'& 5@/$@Q o Doi](=Tjڡ k'_$G/tOAթoԯ{Ǟζ67#B́`Kfhk%\oS[8ȕIWN@G( on=5i<=M};otz/vqV8GGbJ G[Y"k d( !s`8x 3O-MP,hR2xmQeN#ƲL|/6 ~,er1o}LEőگǯui4#ڷv_\`E[JmդgM^˽{xxZhfw l7N*0*nRe^w?{An?Ht<gcUY <Jcj:eW3YQEp鬳=Klz ul݃qta` Bxxxxxxxxx=O&Yj;d҇Od=jkTXk\2QϠ;R*++XIpC>%6\xGZ!f~*J.-5Lы.ʲY4^4 CgaC{S4IKR3={30aAQ&$)ɥV3q`b FǥYu*lUMX5V kR/_h*[><+%]VmJZA+Eo M <x+J%U{ܼ+9em?ڌ(5!1u_9`/?uf%J&&~E]&+OI%0d{Qrɥ2)ЉE{*;:_SIc4o3- ,i?.=$[fcRLx:K8uI Kdg(P,3E6D/\ " X!`~oLիW!(\/P?:+rKS=M}7շ˲V޸ zxxx؛.u>6 ]Eի:|O~z/Xa%IgAI-%˞=Y&~= V K*FXx~=Q 1sUޓrM.6 /2D)Lîaj lbFkSUlmvd}Nb:yA1 k=lmNNٳg!YRPVu-F1=$u?V_:N,hc $l(H$؛.@%OQ]\La`L`NBhzv 0fґ5Nš|vxep Y&2[uc;<+V3W\{:.1!H ,Y+cڒ LdRaHi'ؓv?\۾l#`jx 4y۳zcly'{Ze(hȢPe td)L{xxxxxxxx3!-Sd \x fLfr$])MA:+={3ΥUZHTZvHDg˽.t5ObU]X1B+Y2hb& N=O,иVX|D W}15Ŵ1 -kv3@-fQ+e;>%5<בNDl@b*_ 3aQ+˅Z=}1m\2[ )u_l+hkjsI,qM_x-Iޅl /b :Y+XC86}RV f=dWcfdg{`pFR?$T\i "Mw 2Z˺`va-=QHKgl#}ޛ\ն¯V5+w TG,ʔ)k . wkxY>f:]Ju54!q/lce'MQlJz8dE)@U$A32RpYT.`^f, R.'ɟ5@MJ6&h9CVUmy<*qY{)%v. J jpQb@d'wpvO:Z)_KTۜmF.9Qp%PA,' $-JҥKc e'pd;iJOAXE>`$X [`ܽ'6 (GbOGפżDd'Ǐc%H#X}IvJIMcuupOaX(mΜ9'9;]U'$RQ|ӦM3c+$ T~ANH, '?9Q .Yr`sMq) W[Zbw@j &;&JH uuQ&3:] /.YPx>G)֭[۱WVaπE;w6x~WG5kִsm<8DwcRtiG?co}1pIHؠA{O 5'UMA *@7|sf D -4e?^O r6jc&!Eot7$!<ǐgnI4]׭5j԰d`cFA[cU R+t_~&[}b`x1|ȕ+W L Ix_y]}-fuQ\rE xu.sXܔ<#2f;~x5RJ:6JALag M1qRw:$ۧ)/e3$iVċ^zI*PY(,m۶K LyO]ŭZq^ /0[ɓ']#ME$aF>P$cmV)JI<#W0+-WpU?'ɘ`Y/`+"VJ V5`]Գ3]Z]!Rf(o2Ճ~{3>}zԮ]hʔ)ssi#sHƉRNmo<<J\2Qbl =03QDk`lk{t>1<ڤ]?k֬oiצ㾊2'iRv`-({qƍl#VZuwܹf *AǨ#x (&Io=4뮻qO*?c6ԺxH` q, :v=e<@OJ^.6f;zhS=Vyn@U̙3miӢ>_mGRF5ɯ{ 06(XP /PG5PdД$_X7W;*=zp-AR3 :ԀTVcS6"[ j]Z50ЕA<m<&E.s.|v`A1&8Bl{ pF617o2\Y`i?-GymQ fr,`k $WԃeA`Z)R*EoUAٵPB2O8YjImڴ1xBB6, V~)\SnݢGÇ{u;(t sJfa^P7m+{۷oԲeK[$O0!C.]ص%PcU6cǎHWV 6r(i~`;xr\溸u ww@,p&۟iYd J*fP`,AJB0zؙ`cŊ݇9,{s5 ~)̼p,_|}ؖʉfA$z)/^2a2~' X#^%m* (3Pmo4k˞(x?/Q}{˞(x:߁±gϞpwc-KEӲ;wqwܸY8۱'ʱ7lNT;x ǣy< i{{9׋g 3<^3A!P#_J=ӹX-)[:#d@DFGٲ/^2a?/]£A/ R҉'|X~_GeO,di?/c޻-[C/^2|CEXgp8"L{%>m: I/>:$xxx`hiPqlj[̝<x0Q5jժe&̼QPro`` ~ٳ O.%. -Cmdiٌ%v#/V-\Oޱ0\vopiV~x& Vqe;i#<@PB?J뿚IL Y㏛چ`3 jAQlJHAcq]weSR`?< ۠,V2eضW:Y\'pT s151/_޶[s] ȽpasoR~~u׭wu^dfwpU.ۘ [ VBN $`G[vYgy=ӰTRQ͚5 ƪ%JUT(mH/?'ʖ-k@V^\t<:`,j_:`A,ۃGf{kifNB =OK 6,`qL ׼V(:gmȑ#<{TBWd/L7pC=%s @=KC=T͋*=Qhy u?/YT/W˖- 0{%ͯW `YHg^ni{C7c* AϢ6KfYUTuG/S?/H$SSSsb_~P폽xɴڄ=U6rJ{3k,NtV2Xm`C*kRU,'X\$=MMK .E9smx-pV@^45` x'pBt饗ڱ¨p/Odxms`;4Uy/V__*؛e'\h`%Wwjs R̞=?|g$xI'y晃;GQŋL)|s%uHNFKf;VW=y1tShnlC沄 Z%*AM}M΅<#x|;#,h߳WLY_Bo~%J޿' XW*9ȑ#3g:w*5_tb)Q1HI vGP9WJI3RtT#TNA0p ܺ pQ&;@VuZFM^ ܷ%{K,9G ~ɂ ^+*/%Oછ ׺C{ 2دbD)tc=XԵD2bDCLꨏu} d C7:+ce/Ha"V}<3aZ.;&#Wul,Y+3)ȿЏ<M~3X$P\kSN9&5f}y/pn|lE R}blƔLw1N^"f~[P. BsxɴҨDnM \f`Pj$X0k/R+y_UA V 5p?MUR@n@R- 3)s~rwղeˎDe+˂ VUyԠp8T rAf0Qs ~y!~}%{z g}յh{ΥK23|YC7رU^쮂B… \}O' ҵkW\?1++ %Ś8Ez]i(Yy7-6`:PClܻ;ew،G\kMĬgeW4|w; T|TLd&7*6Ț hP6C׼X#hpI@t*,)zTAp?&Lȧ T'/`[m5_+W0-労͇bIcP?l ۵kW ?^W>0y__^(A{xlƍg{mѢrmA t$s#̶pu%حt=@Oۮv9HB>j[{dHj*m\&;O>. zq#Yp %Uh*TY-̗/_$hdH$leGQ>JZ ۴iӂOۯ\~'!CwS1֫WVnu;X)IX v.~# @[ 5  ;TPJ7[#dg6RGuQIa|z: <DZٳw}2(&cO\=p=6;&S+8Gk֬T RI6ݺu'\=p=23zq#C@蠃_wK[P}ϕu r nנ5 +mOV%Uj {4ILA yפ֭z{۔,|6lؐ e Tt*Ť-W>zo.>o QGI&EvJI]lc/mݺ?9ؓcO\Ul4[.%fB6W^… gcO\ ^:PX$Dϛ7/K|z}T t6C4x#DA ^5kVmwhʕ8\(S)isNP?LUmVZt뭷ՠW }FmK2)khPU$k^:@V:w'Ǟ L,޶cƌ@E'{"xxKy;EߟOPdׯ瞅\=p=23zq#J, @ }H_{T(_@`>J0`2k/ \* Aۮ%B AޯNfjm[)t$V%*K tDF*U2>M80gΜX,O4 ]]Sɒ%gs7h RǸHPOk~%D Ktłb?- :mT(p'Ǟۋ!CD}Qw&{gjKc*AhAT};ʘ}-zp둙׋\6vl <꫓WW*7z*l,WgWO/ϻTAA_lAѪf bWjgb^%XjyXo! tذaӮK+@ zR6gXGUer>E.O`p4m4+ .4\y8*qBDm]7b5%A8ؓcO\@AΣ>t{p=t8p\L+WW^!5'lTLE4X\X5U^K$D?^A ɓ'&~{:a8;w-ZԒLvKjޟ>} tnͪA BѲe"U|vڔd-:uj$᫇-I2 , De%hРAYV͞\=p=t8^7./{]|y|&xi۶mJZ`< hM ,ا\=p=23zq#zE׷ e,WP2KF60&f!&SN t]>%[u, h_(Kk`h߮l#m͟?*F,P.]:l_b/nܸqkQ >5j(7*XŽ ^AXs~@]>zp뱧GO< $4=[l^b;~i¾p=t8p\LFxLVv,w4e+B")Z1 *_iJڵ. }XE dbRJl *;kIxw'Sc ʿllwE u& \0]}饗,1p0 `hb >e+.ygʎ=[23(X`!w=#%Cc=<!Q6,4E|6 >g (r?-_Zp=!![9m8q;^zu!mA؆U\dr(Ґ΢c ~OJb,B4RI(@>]I93!C/tH]$\t#:ڽ=;eZ_}U"p!\sNvڌn袋K-楐ۻ+ Znn&dnHViS ́!dH not! }! N:N8|Avj5fAo[vq\."Me2i:K@2鿆P~Gm ܏;T0//qyi3|dyma۠x.}U lVꃼ#g睱OxN܏3U8C:0Z0$0*wޙnde!ϐ ܃%tH vpo U{.ac5hNU$V'ԖK3zc q0/XTD rI&Q"61aDa` >#,~9caz 16K{Rwlλs-ۉ9"7ˣoY{5f5V؝H.(.2Cj3ڞ,z!oaW .`"n!raoT@6&kџM9{R2N.iAqRa2N_*?fAԋa89ӑ ܋]z\r#;1 G3ݻ7dmAHL `A6@ؖG?CNbyB8 {L"b\ВrHVYu:g CT@DVhnlgxwm X8'2 I%!AX O$K_VF0O rȭ!q%=3y'PRl0vZ Oﱣ3X '"mE^eJ!goH6+i,t1$'6U:1؈^EFVNS*O?"; >F=quSIP'g5 ?8.I{$y.myHJd.xNlXC\<\aF8lҿO$ o香d8^@&D(ԩS +$ڵk6|e3O&#=SWk_ +HOPS98l>9vy.a3K*F6ǨARݗ4Pve4d"3Ee"3؈mMDr`aWߨ{&r.3#Qq D.0!л,{i6,?mq $R?o6[m%_`һ/bF7N/ߖ G.mrLcGNkr(LD2 DBz*"ē($Aۉe1j)Qr=Q㉓Eek?,zEO@>AMD8`B.\8SmR$Bk) ڃHHґ \ЉH~A>l=+YKS[GB S"z}t3t\z962ۭ[L| &XOSe"]s!@#cFWgK%Py${k93tCbQHGY=6Ht M6OX{Jγ]uDH&gل` /m2N0z=vy0ߝ/edKAYT^:B}_B_%{y[(=ָ_?6v{k:~͵OH!̉; M Hj.]vCv/5U9c[Uk32{.,WDžr&)=fO*$M;>E/W!wuߍ*7qփ;DN2b8lFvxK22xD0Fc,@! xg'\XNȖX[6WTo&N"|=0EH`WTND!K9%[|q 1FΝ EѣDU_8B= \)I|γ&!0 lm"+rXX_pDzȬa!w\#|Rbp;K6K.CzR,rS{!*Df$ʹڽ  o,hU]YE-F;7%%Q \<-"ZKWώS2zkDܾKg 'Y}[!?4%tȠI X.x@M8eC޵ȑt6@E٤I7q. oR,K=5GM !z<1=cE֛grq [uX?~r[F{viJ "IJtg) ߼sGG!p@(6/jұ]RTkfJrSe>C|@k^عؙM9 jja7#v`0_O Ƚ0GْcY2ߡ Km=>0sL\̟})HD=zҶz7IDf%{_gx[sv֭/דUbxc.JRoDv^|)nt~{ϣ~J3o~Ԇazt[eoeߚIfaFD'@J&0'Ж" PRD@3/B81dl9[8$2N ޥ4[DЖ]9VP[" G?qLd-"ӕZ$$&q?(T 0ZU .27y.Q}(W$N۬7 ʄ6d;)[Dr92K゚걔"DV8Hv""'!"*!b+#J ͕rR)5 ?(}{"r$f %W\9E-ƌpDZ.,W+Jdlߜ\X@*WكHv.' 35n61(N<0&g%?O3ryNHj !"M޴!^m,2q 1nWEC>i \k\~4'x2џ4XLcO4Fg1|r &=u;Ѵâ KσhnjiWT{q"OS_l!c_}r/-Au nA,H "Jtez+,^)gbi5_'dmmL81"QcXo}y{N{GH\.gN;Wae/7hn'9ȩzO!ov\nhy S{ !R~*Do8N]ע4H8J$w+d5Cc1%pEܢtDZ[s!o D=Ar2 "nK>Z Oi[.*[#2|f_g8Lv/E:H\jr5LL{$R 2"i)ttQ,gn biKwŞP=nev) q(ĢRM%p!ieLѳRtmzT'i-c"'|DXaw$2XѡG='ieA?'pOJ.l!ڏy 2^ʚKD6fݙΐ?9^s{qNd"E+'paIe&]|,Tk:a@HJ;5 w ߡB7Xdgfh -8ߠ 6 ˖']HwA y1'# DSEq]/ٯ]g{:>;{yX>8fι!Y7߯'pxlMwHJ*.L{l_S1 f;-WjQ?=z2[g'ϼ:i{iz!xV\v}a^ ЖUQ^ly*ȨbNzӅў\H}!g.6~H(mmM%p%S\/,(),LL}m"0yt"D͓T#p{.LBlC؞$Y>!"gA>T+ԇ PGqfY` B)zQqpV?!\vEn`aK@B}DRvܖg4K=?A'&]2>׵ENQDr# P2EVcȴnEz(y(ɀDǑ﷚q i%BmN|OmfHE"ϗStV'R!p!ʤAg[l-q:Tڲ솜ܟa:C-cAP"US Qw{NE 499Eov\tQeG*|C|=3?'.}暤]nQZ.Gݴl/t>WWNsఝD><,.((=[} pEo܏RWS_y4 \}Xƕ!#GL2UD2w H%j-%2,<@ʎG;JuԼ5 4$WW<Ć#OOD,ѧD^Gﮱa<@mW%6ՍחnSƵG^xO~Ycl{קPKMn=d;Rvÿm{[a=0iW^gD!v&p 0J8_Hka+ l-䰬rNB' ‡mILX$zh25>8>/N8.%v1ձDP(T BYm2$}3D!ji:9I}B|6$/uH;Asv "c1!B젝y uX #BB}?#=y H i [h A89J,'k.rv֖D7x`[#rOM-/p/ѣ?d-cZ.Q!88hrRoWu)U0aBwV-JQ]!z@rQW" ɗ(GlH b ~,̪.q8"x?–q 8GH+ \]uQӵw1!7>Eh+dcV} -sE8w8\qpsd$sgsFe;E.46mPIe4^C'DnF3.N,#pUDF?rLݛېgneɿGc|ZELf"9 1Tj9l@`u|?KtS"pjө\"i0 Xn#o! Sq9Z蓳phrd.\D0Y8Dx;2wʐm0:*EG Ct\Npf"z@0tn !a8z `|9~Mͮ*rbwEձNX.?@(r#2Վ"Ud혳}w)uV&p!?idB흟nl.zelnO!ߍzr =ry]vS[ZMȑDߙ%/t3NOrYSD&$W_W>ġND1KL\\,0"~z9<<5Q1/\ҴCRt|z9WyGYR{9vfa8Y̍mSI.֢E(5  a"Yu(FM= f&3tQմvw-'ݛlGpv` 4'2ngjeL캢# r܎3Βn@~7JgQyE?)hJ.$&DFdUGkiP@(]m-k*/wц3{;?>ܞ;ퟞ[U_R54<ʗTW?e砳M5 0[6w¸QѹN]rB&8RlCaF&KF+NAR?=m{kd`OE*JdI$"I )(3*F)u)]Yp wc{cSANQێ.Nvq\I.|>-fA?ޏ%ߖLRygO+: >G-ܩ~Vw#R>~6'.d+yE~&} y&%E"ʥ36>eBAL9tj"p9 󮶍V\z)6K[$ps z' =g]riw!j193Z[<6v)b5sɛͥ{Co`1b`[#p B접8,rk^{SZnu,r0P؈w]9>E8Smw1Eְ/IېdV"pa(qUZٹ^^F$2G:ws럩ΐz___p؝SW L=\v/*:6RA,J`A­Yt 38luSzY&pMaF>:K92~#8y\"LvdNWWDRԫB| C~OrA]T/b,+U.wг4)+,=5B}qתlےj~2/#>BJapmzn9VQȶ3RDZEN[;M|vDA/rEsh _"p$|T2SN Z_@29iWAc(D RāC6Dͯ~9]{~Ż_$کۆ1ۈ-z4p%>À!ZXJsIcr䴏 9pLS9c#ab:M21gmENr~t:S{i,:,3=1H|!2lsȒkSYfsjrz}(}tQ_E3D<ܬgy^jnX4 "9ׁh":>0;\u$Q6=sK+U~"Dv|뒟e.φa,xs;ɗe3|\抓`=hQHz%">cr`BJSZqAJnMBr,C\.[bva3n=Au\N4^a-'{+BqߴϬH3x5%6Zw9^-UNƞG\\pX%c˔cwбY9˯^~vSstJb?NEsX /%&pMaNNI"Gf$U3rVdL4 + %gO9S$ǢDbЖ??$FS"W&!/H=HϠT c@`L᣺Ey@ęRTVOOCOgCdo1} &m ;iOW*;fEH9G;p_Q 'r_@?{ǻSi CD$|ˉ.dC݈)BFE~NJ&c1xM 8-E0sx m_"sbڪ{kdD9reV#p!2ϖH T ӈ0mO)r7 dzgS|(6Ar(#J,6 aIEM~ãw[.DOt9LDGm>yG鑮"V,sk\}m7mŮ~ȭ\8 !Rj"KCt"'5ڙ "8DE \yW0cTc{"rj<&;s|ZW%6bх^ypsrxωl?83m G!dJ4MW{$py/GeAx#$EA=c[r:C)"s?#t*$&UZG*ށ< 'p8*+nO[- $ho.s<{ 8kdtMQt "?5 }Nѷcl1ON3{D[#p!s(4#u)"UG!p'RV-bKV֢:6k- K7sb&p!b9I "p_\=ഛ"p/FgrzoZO8NPs֟op7'hgc+}gO?M➿l ܡaB'T-$%u$IV%r C2"SvԄ Ee~&DjAP@(`./# %tut \'4Λ"x٢J$@TM ED@ Q_rRu'E6tYlfe8_vқ"CJMErdw_S$Zt? 2]VFBəLio)N8"pt~W\"pxh!<DScC$ͺ6;,>ZᙌQ BCܠ$׌]EaPI>A~="C?~LZ$#g+8m. \ށ\UQ-EF\&LXSzd 5H.Afd gDB&BiMLZ@TGE&NcCyjglܸqH~.nœBgdWl6uG7FkBŻ9 \W̱<ƴ|i t  8U_C {q+CDJi B_K9hc6ߑYE(̫XD-MB~ݦ"pϻ2"Ϛj!pwo .!iey?Ăx}]Φ*`2VЗ<Ȓv_VMM OJ%$5P$mZܜ|[قK-rh|IDjrsڼڛmZZܵ6&}?Nz5kaWyTK\RUwR{p9p/CtHmI^"D!"|m".~)VeMEʈ]:=P.A$ȨNS t rCbrV `Ðg 8.\9Ej%p!9!!<' cWԶ"-QHF8 qʫLc"GE8 \ pѷ ko(4.-.0)>wyGjâ Dܐ D!z}3DW1C-%eMJ@+e.uk\\)*Hdk:!ZOuwlD"?)eHi_'ggg GRFsB%r"-D.J INB pBN!/lf!YGD Op\KDCFD ӤP_H]9T#Χx !|q 6U{8Q;&} q|}!j=C4@0&q9sGtuᜟjL](̉Dm71 fa3@[}+z!3+%tz,z,B4?wzcs`ȑ#yɽ%e TFrAWSƷENh1#W(+#H4n?\*,j,PK)X<9_g Vg1g5Ws}ȣ,cǎM;mGUcBd]}/zEt7"*H|19gk."iQrS>R,}E"=4]Cfe7EŠt%“HӦ|{9:mpP΋-'ܑިTv_nuE~{9y:_v*%rRx yl=ե.ka &1Dr3-c,\q aup$"|#RdShm[a[r Gwy=H$Q}"X^2Adlr͖ꪝ! "*?,9wI&FL8p\T[ӝlĉhtË3D ¸Tp!Á!C_|f88?q ?8ly%Bh;Oea)m\ 2 :$+wHOA| ܯ% \"dggD_1.4VҸoJ_R^ߖzv)H@HC9ury )'5l 'z I"eYBz2I?XmR԰Hoލ/'zC:!A@γ"ǁZEosDAJ0&kjr6ˠy8|h!dq:JQWHDI9a\ϻ瘏!Q ,T\u;2ƘSOoNҍE|W@|D(K<n=szQ3`if| eܠ1V1|YczHR($ߥ+ CMHjGBj EBbGo6`UiCfԗv =]I"͓}+93-[>c#l[iȝ.Zt|Ms+5+7m-NAR5ka.N@"` 986GÁwCL2iH]ؒ8@KNV5 Q*\)J"(HrV;MMΜ8 S5.HdȮD,aʐ!Qk{F>MUqG}'ᰤh?/ymG1SB&7ewf<`;y>Ő yĉK '$ BW\}a,<̇B[ 'AB0Fr8M9픢X/4 E[%]C"oY8.GTE 2*1 qo7@\qSD @oOBgl%$)sVDxi <[kyF% / 3,1wkW97(EC\ICT %pb @1G˖6C:m_B}Zcꫯ*8LI)J(iDnSЛ3J.~%UT7!yBΰ}HO&6g38ĸ뱐/Ntq̛U \ByJ՘1cN$.^CaHcc'搰e ]I+7v`ck4HzU|4Y\RDjp{緳WYtر KBDBeu/;mȋߡ_x\dl}5kaFA*eلZ?*?<rHzA$)Za8$A&9Z^D;1˰{g I4 Հ"D\][~&CNb%k1*sʸIIL~Mߓ_CL` +'x ߯+Dw7JϓIT{pW!/m*HDr[k8f=;-A;:"gTc_$8TD}Mnt5f? Dj%]L6yÏXс`? =ؗXEQi\9rZ8u@CB+³Q,gqoz f93 `H3"D?V;Jvpc&pgaE.ㄅ!>V,Vin[obݲt AIG@, p vʻJ?}̣w:NcCl yW}#S,ΑF%'UwZs16vdخ+s/ɊB"$p!heNA>eu[:lXcf!S4DCtCPC6M.: uCmnz鷔J"cw?%Bt\"E)'\u^|lN~DR>GlMAnBr Gn]:GNaZnD鎥{$XqDFG[R~(koQ&pMamLD_q d2h#"D 'g2 "+pj##GnMTGɩ#vQ2-, kz 2D0v$dC"cA>ܙ(۱U3TILEYA$}cdďV*/"'&^ ia"K9vEJKQZIKl`JrD\'2'-ؠ玗!pڱഴe-0D01yE!p-Ky-4bF8]"1$"QzgR/H{ pE.1Zrp"gz/ H=P a\I)Ŕ:=M5&p~q ;2"pސiKB!Jg;zNvti2.8tBDWh?9sXQ(9.se"u+ q'gbG2Y1$I{yNH}f*M9S41՟"JYD.)+?E@,)mNւ):, !3+RY Dn& c-5`4'Ӊ}G,>Hc݈z,/v,i<>"G|i: 4n=,_B107\ŶGHCa9a"[Tȴd"NIPq/5TNy)2W5@B"ZVzߥ6@FH9L2J9gSȍEJC&NӪn"G%3 k,%|@v+yz#.;[mFvWQX!ny^ە4OW0,è¿-T?| fgTmEdTzK'DR&2Hg6>/˭\6"//Nٳ3n~A{M/d\ziTTI0F+ϑJF}o{[^DXW'Ecܒmܸq|2z!F<|QeE<:% >NP$7D[CѪ;h,$r b Gj26T~s~ڳT^L^C&Tq0!$ )ҔB8y(c3^HygBgh:hpnO*g/COڏmt3)ALUﮀ>N/~E?(m'Hy 1Os[O@.>]|Q9i DA|)ސ~2VP(Y{V}z]L}E[$mLE1x&s겤j]m Ո 3FUVɃ=;1rQ4/X䫩 ,n %zSA(&p#bLzr_"zrO$]0c3&]~`^TzG?=3؞@KY, L4HFS".z!pZ Ry:qac/1=P^1&CZMENKEX1;}E1_H-]+VqX9103/aw !ۧq)cE^׽nRD8sVX 8,Tcj;$ C+V}. - ڕUm'jDuy>bS7ZaQGd]6 d#h@KE[`4?.n_m;Bl7qHD$ӄFds% l"w m.m˻9pq|'lW;tǖ> 6h(l0aB"x6YeAId;}s2GI9`C.a2Î{"[jq+z tsӒryv$zfM~FAA 0bpCƫ"RD,yT%YD0d~`YK:Aj?2|tZ{d"WQi07"$#qq ܦ1sWT`AъIy6s^9yu,2ϰX<м`]36ږdlQO=g\6%2B,X e~f`Ψȿ̎"Rh̘ơqTSf>Pc< +#Dj|. ?xG{1;-]Cl\"X"A{Wyq2@,6wyƀŦ+摪$ \1'E(mR~x36O_D}ӳh h\W!Y.M?U w#Lڃ)L=k ǦUOh5ԗkR+[_"#DkrwF{<ºG̟ǽ"Ma\qEd$_dBbBB,@N@Qνb NđȔp$px [!cG-'"{AX`cCD#K7QaLnDR'~ ,mɽԏ{G{@f*,~8M)<Û:f , HJ!mÑQ+ʂM1͋cDs}mE=ampT&Ĉ\pBhpJA-Հl#]ཀྵ7mEuqQ\ 6h-yUCg;dfZuc@9~dwZrHAw@V#kC;* ho`q!I,85|\.aBXҟ17c{p% !}:]"N{5Pơԉ1x_zt9?_(B<Kr/ qqA cw{E[*s!HY],L0EA8)ZR {1!y YSJ2bzꇭ1iZ(89qj2.@GFD#vmgT\л@lG 6NGx%XdC#!_S͂uAb/[^?y펭ǜ@-v͡Oc~ac/U۫V=.v1ka4 K$= ^y!>a m, Ch-`KQ8D8CZX8 rk 90ׄ ?4 WnۆL%1rR7t11s  š="KNVWkEzl?u"v95A i >)/D pe~Fp1q|F-N;NDS6 ERO۴#F[>rBs7Rv !ϐHP+RP1Tdr 9oHH Bʴ0 ߐbWצA{#p' БaLEd}A"<cD9Unu?cyD/o8c\Q&{lgv, T;%1)\ܘ~-7$ H&At6:9eAD2! b+wJT"~7q"H۞#fJsr0+LͫwI]3}aL?L- .&pMa)m^ B#]vM-580_D70kL/ȋɁk͔/sGgC5΁&pֆ \%a \06W#/"2pH&BCprJG \aט^xzpU:ɝCi$W Į Fkђ0Q \t(v *h W;5 h0k&L  dMNiƓ JеkGZ|9LjiF{ \ahIuc=V8#%^z\t \0Fk0kL;Jn7|~+[r-9 Çolhm5Z&p]:"+2k엁>ELm"puʟ j0 њ0k6LӃW^ypw^}U7p좋.:VةSB=.STG\J{ \ahIuhȑ# {o:P5HX-8x!p3cag \06Fk0k4cǎMd&ZvM7ʹ%;o!u,σ>89Mov}oFkђ0x -(B'(|ǽefDR4sj\k0 њ0k6LMo_޽{_ܷɓ o wumw]oO /n0k$Lt$_,<#{>AI"o)Dj~H[@evam"p)2 e04Jຍ\fvS+e2x°a _|Nl Q+ӧpZk-!nDӻロ*B~oNns=]fz%p[l1KD&p]=;nܸdB(tڵp뭷DjA:O*}]'W&LH-Daц?xG⌺ӟ+be˟KK }WNrȩ⡇6r̨osNᣏ>Js'"4o$2C !])! -MAxEˆKK=, &曯zIS6rio{ \vAr@_#y)K<]s7B殩Cr୷*|}@ގ=:miђ_}Us=l0 wP8蠃\\fz{ O=sƥ E<ԹstT"6.c-( S[N8H8CzY׿5fzw2EB. :ׯ Gu;ņ,/+}L-o'~euN> ť>͎p'pǏX%U ^{mA9m}"믿d3q)1]ve'}х'RKd\][P$n0 h+E W\ EӼ%Cr8qqi"ϕ |>;ҒEIKysLpA!^eȵEզy "Y?/0h @%–"v2-y {uRGMSna[}S\0 3|?__8SSZ,%YvcQ..-]88u2RFyN׬n$>#:|DF-ȹeD:}aaaaaF O~mDUge:Tȴ"w}ꩧE &pJp/<4 *@B'aaaaa4@s=[ ua5wz ǏO":0N|0E9ܷf0 0 0 0 0 o"e|_ }QaDRߋVoe[N"ѽ$6"p 0 0 0 0 0D2,S>|xCw]:UeEڎ('jˈ99oEϦ[K{k7aaaaabС>8K U&Kѷ"bHmNN6H\R(+v0q[bcaaaaa4 _!^ } [.J퍊=Gͮk$aaaaa \<&L8H$ DDWͣ(܂>OA[>3 0 0 0 0 0N o-da1 AKT-q}E\v0 0 0 0 0 EOHY\ C3.]k0 0 0 0 0 )K-ѶAdʁ6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0Y`F:!%tEXtdate:create2017-05-22T13:18:35+02:00%tEXtdate:modify2017-05-22T13:18:35+02:00nIENDB`borgbackup-1.1.15/docs/internals/data-structures.rst0000644000175000017500000013705613771325506022534 0ustar useruser00000000000000.. include:: ../global.rst.inc .. highlight:: none .. _data-structures: Data structures and file formats ================================ This page documents the internal data structures and storage mechanisms of Borg. It is partly based on `mailing list discussion about internals`_ and also on static code analysis. .. todo:: Clarify terms, perhaps create a glossary. ID (client?) vs. key (repository?), chunks (blob of data in repo?) vs. object (blob of data in repo, referred to from another object?), .. _repository: Repository ---------- .. Some parts of this description were taken from the Repository docstring Borg stores its data in a `Repository`, which is a file system based transactional key-value store. Thus the repository does not know about the concept of archives or items. Each repository has the following file structure: README simple text file telling that this is a |project_name| repository config repository configuration data/ directory where the actual data is stored hints.%d hints for repository compaction index.%d repository index lock.roster and lock.exclusive/* used by the locking system to manage shared and exclusive locks Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files called segments_. Each segment is a series of log entries. The segment number together with the offset of each entry relative to its segment start establishes an ordering of the log entries. This is the "definition" of time for the purposes of the log. .. _config-file: Config file ~~~~~~~~~~~ Each repository has a ``config`` file which which is a ``INI``-style file and looks like this:: [repository] version = 1 segments_per_dir = 1000 max_segment_size = 524288000 id = 57d6c1d52ce76a836b532b0e42e677dec6af9fca3673db511279358828a21ed6 This is where the ``repository.id`` is stored. It is a unique identifier for repositories. It will not change if you move the repository around so you can make a local transfer then decide to move the repository to another (even remote) location at a later time. Keys ~~~~ Repository keys are byte-strings of fixed length (32 bytes), they don't have a particular meaning (except for the Manifest_). Normally the keys are computed like this:: key = id = id_hash(unencrypted_data) The id_hash function depends on the :ref:`encryption mode `. As the id / key is used for deduplication, id_hash must be a cryptographically strong hash or MAC. Segments ~~~~~~~~ Objects referenced by a key are stored inline in files (`segments`) of approx. 500 MB size in numbered subdirectories of ``repo/data``. The number of segments per directory is controlled by the value of ``segments_per_dir``. If you change this value in a non-empty repository, you may also need to relocate the segment files manually. A segment starts with a magic number (``BORG_SEG`` as an eight byte ASCII string), followed by a number of log entries. Each log entry consists of: * size of the entry * CRC32 of the entire entry (for a PUT this includes the data) * entry tag: PUT, DELETE or COMMIT * PUT and DELETE follow this with the 32 byte key * PUT follow the key with the data Those files are strictly append-only and modified only once. Tag is either ``PUT``, ``DELETE``, or ``COMMIT``. When an object is written to the repository a ``PUT`` entry is written to the file containing the object id and data. If an object is deleted a ``DELETE`` entry is appended with the object id. A ``COMMIT`` tag is written when a repository transaction is committed. The segment number of the segment containing a commit is the **transaction ID**. When a repository is opened any ``PUT`` or ``DELETE`` operations not followed by a ``COMMIT`` tag are discarded since they are part of a partial/uncommitted transaction. Index, hints and integrity ~~~~~~~~~~~~~~~~~~~~~~~~~~ The **repository index** is stored in ``index.`` and is used to determine an object's location in the repository. It is a HashIndex_, a hash table using open addressing. It maps object keys_ to two unsigned 32-bit integers; the first integer gives the segment number, the second indicates the offset of the object's entry within the segment. The **hints file** is a msgpacked file named ``hints.``. It contains: * version * list of segments * compact The **integrity file** is a msgpacked file named ``integrity.``. It contains checksums of the index and hints files and is described in the :ref:`Checksumming data structures ` section below. If the index or hints are corrupted, they are re-generated automatically. If they are outdated, segments are replayed from the index state to the currently committed transaction. Compaction ~~~~~~~~~~ For a given key only the last entry regarding the key, which is called current (all other entries are called superseded), is relevant: If there is no entry or the last entry is a DELETE then the key does not exist. Otherwise the last PUT defines the value of the key. By superseding a PUT (with either another PUT or a DELETE) the log entry becomes obsolete. A segment containing such obsolete entries is called sparse, while a segment containing no such entries is called compact. Since writing a ``DELETE`` tag does not actually delete any data and thus does not free disk space any log-based data store will need a compaction strategy (somewhat analogous to a garbage collector). Borg uses a simple forward compacting algorithm, which avoids modifying existing segments. Compaction runs when a commit is issued (unless the :ref:`append_only_mode` is active). One client transaction can manifest as multiple physical transactions, since compaction is transacted, too, and Borg does not distinguish between the two:: Perspective| Time --> -----------+-------------- Client | Begin transaction - Modify Data - Commit | (done) Repository | Begin transaction - Modify Data - Commit | Compact segments - Commit | (done) The compaction algorithm requires two inputs in addition to the segments themselves: (i) Which segments are sparse, to avoid scanning all segments (impractical). Further, Borg uses a conditional compaction strategy: Only those segments that exceed a threshold sparsity are compacted. To implement the threshold condition efficiently, the sparsity has to be stored as well. Therefore, Borg stores a mapping ``(segment id,) -> (number of sparse bytes,)``. The 1.0.x series used a simpler non-conditional algorithm, which only required the list of sparse segments. Thus, it only stored a list, not the mapping described above. (ii) Each segment's reference count, which indicates how many live objects are in a segment. This is not strictly required to perform the algorithm. Rather, it is used to validate that a segment is unused before deleting it. If the algorithm is incorrect, or the reference count was not accounted correctly, then an assertion failure occurs. These two pieces of information are stored in the hints file (`hints.N`) next to the index (`index.N`). When loading a hints file, Borg checks the version contained in the file. The 1.0.x series writes version 1 of the format (with the segments list instead of the mapping, mentioned above). Since Borg 1.0.4, version 2 is read as well. The 1.1.x series writes version 2 of the format and reads either version. When reading a version 1 hints file, Borg 1.1.x will read all sparse segments to determine their sparsity. This process may take some time if a repository is kept in the append-only mode, which causes the number of sparse segments to grow. Repositories not in append-only mode have no sparse segments in 1.0.x, since compaction is unconditional. Compaction processes sparse segments from oldest to newest; sparse segments which don't contain enough deleted data to justify compaction are skipped. This avoids doing e.g. 500 MB of writing current data to a new segment when only a couple kB were deleted in a segment. Segments that are compacted are read in entirety. Current entries are written to a new segment, while superseded entries are omitted. After each segment an intermediary commit is written to the new segment. Then, the old segment is deleted (asserting that the reference count diminished to zero), freeing disk space. A simplified example (excluding conditional compaction and with simpler commit logic) showing the principal operation of compaction: .. figure:: compaction.png :figwidth: 100% :width: 100% (The actual algorithm is more complex to avoid various consistency issues, refer to the ``borg.repository`` module for more comments and documentation on these issues.) .. _internals_storage_quota: Storage quotas ~~~~~~~~~~~~~~ Quotas are implemented at the Repository level. The active quota of a repository is determined by the ``storage_quota`` `config` entry or a run-time override (via :ref:`borg_serve`). The currently used quota is stored in the hints file. Operations (PUT and DELETE) during a transaction modify the currently used quota: - A PUT adds the size of the *log entry* to the quota, i.e. the length of the data plus the 41 byte header. - A DELETE subtracts the size of the deleted log entry from the quota, which includes the header. Thus, PUT and DELETE are symmetric and cancel each other out precisely. The quota does not track on-disk size overheads (due to conditional compaction or append-only mode). In normal operation the inclusion of the log entry headers in the quota act as a faithful proxy for index and hints overheads. By tracking effective content size, the client can *always* recover from a full quota by deleting archives. This would not be possible if the quota tracked on-disk size, since journaling DELETEs requires extra disk space before space is freed. Tracking effective size on the other hand accounts DELETEs immediately as freeing quota. .. rubric:: Enforcing the quota The storage quota is meant as a robust mechanism for service providers, therefore :ref:`borg_serve` has to enforce it without loopholes (e.g. modified clients). The following sections refer to using quotas on remotely accessed repositories. For local access, consider *client* and *serve* the same. Accordingly, quotas cannot be enforced with local access, since the quota can be changed in the repository config. The quota is enforcible only if *all* :ref:`borg_serve` versions accessible to clients support quotas (see next section). Further, quota is per repository. Therefore, ensure clients can only access a defined set of repositories with their quotas set, using ``--restrict-to-repository``. If the client exceeds the storage quota the ``StorageQuotaExceeded`` exception is raised. Normally a client could ignore such an exception and just send a ``commit()`` command anyway, circumventing the quota. However, when ``StorageQuotaExceeded`` is raised, it is stored in the ``transaction_doomed`` attribute of the repository. If the transaction is doomed, then commit will re-raise this exception, aborting the commit. The transaction_doomed indicator is reset on a rollback (which erases the quota-exceeding state). .. rubric:: Compatibility with older servers and enabling quota after-the-fact If no quota data is stored in the hints file, Borg assumes zero quota is used. Thus, if a repository with an enabled quota is written to with an older ``borg serve`` version that does not understand quotas, then the quota usage will be erased. The client version is irrelevant to the storage quota and has no part in it. The form of error messages due to exceeding quota varies with client versions. A similar situation arises when upgrading from a Borg release that did not have quotas. Borg will start tracking quota use from the time of the upgrade, starting at zero. If the quota shall be enforced accurately in these cases, either - delete the ``index.N`` and ``hints.N`` files, forcing Borg to rebuild both, re-acquiring quota data in the process, or - edit the msgpacked ``hints.N`` file (not recommended and thus not documented further). The object graph ---------------- On top of the simple key-value store offered by the Repository_, Borg builds a much more sophisticated data structure that is essentially a completely encrypted object graph. Objects, such as archives_, are referenced by their chunk ID, which is cryptographically derived from their contents. More on how this helps security in :ref:`security_structural_auth`. .. figure:: object-graph.png :figwidth: 100% :width: 100% .. _manifest: The manifest ~~~~~~~~~~~~ The manifest is the root of the object hierarchy. It references all archives in a repository, and thus all data in it. Since no object references it, it cannot be stored under its ID key. Instead, the manifest has a fixed all-zero key. The manifest is rewritten each time an archive is created, deleted, or modified. It looks like this: .. code-block:: python { b'version': 1, b'timestamp': b'2017-05-05T12:42:23.042864', b'item_keys': [b'acl_access', b'acl_default', ...], b'config': {}, b'archives': { b'2017-05-05-system-backup': { b'id': b'<32 byte binary object ID>', b'time': b'2017-05-05T12:42:22.942864', }, }, b'tam': ..., } The *version* field can be either 1 or 2. The versions differ in the way feature flags are handled, described below. The *timestamp* field is used to avoid logical replay attacks where the server just resets the repository to a previous state. *item_keys* is a list containing all Item_ keys that may be encountered in the repository. It is used by *borg check*, which verifies that all keys in all items are a subset of these keys. Thus, an older version of *borg check* supporting this mechanism can correctly detect keys introduced in later versions. The *tam* key is part of the :ref:`tertiary authentication mechanism ` (formerly known as "tertiary authentication for metadata") and authenticates the manifest, since an ID check is not possible. *config* is a general-purpose location for additional metadata. All versions of Borg preserve its contents (it may have been a better place for *item_keys*, which is not preserved by unaware Borg versions, releases predating 1.0.4). Feature flags +++++++++++++ Feature flags are used to add features to data structures without causing corruption if older versions are used to access or modify them. The main issues to consider for a feature flag oriented design are flag granularity, flag storage, and cache_ invalidation. Feature flags are divided in approximately three categories, detailed below. Due to the nature of ID-based deduplication, write (i.e. creating archives) and read access are not symmetric; it is possible to create archives referencing chunks that are not readable with the current feature set. The third category are operations that require accurate reference counts, for example archive deletion and check. As the manifest is always updated and always read, it is the ideal place to store feature flags, comparable to the super-block of a file system. The only problem is to recover from a lost manifest, i.e. how is it possible to detect which feature flags are enabled, if there is no manifest to tell. This issue is left open at this time, but is not expected to be a major hurdle; it doesn't have to be handled efficiently, it just needs to be handled. Lastly, cache_ invalidation is handled by noting which feature flags were and which were not understood while manipulating a cache. This allows to detect whether the cache needs to be invalidated, i.e. rebuilt from scratch. See `Cache feature flags`_ below. The *config* key stores the feature flags enabled on a repository: .. code-block:: python config = { b'feature_flags': { b'read': { b'mandatory': [b'some_feature'], }, b'check': { b'mandatory': [b'other_feature'], } b'write': ..., b'delete': ... }, } The top-level distinction for feature flags is the operation the client intends to perform, | the *read* operation includes extraction and listing of archives, | the *write* operation includes creating new archives, | the *delete* (archives) operation, | the *check* operation requires full understanding of everything in the repository. | These are weakly set-ordered; *check* will include everything required for *delete*, *delete* will likely include *write* and *read*. However, *read* may require more features than *write* (due to ID-based deduplication, *write* does not necessarily require reading/understanding repository contents). Each operation can contain several sets of feature flags. Only one set, the *mandatory* set is currently defined. Upon reading the manifest, the Borg client has already determined which operation should be performed. If feature flags are found in the manifest, the set of feature flags supported by the client is compared to the mandatory set found in the manifest. If any unsupported flags are found (i.e. the mandatory set is not a subset of the features supported by the Borg client used), the operation is aborted with a *MandatoryFeatureUnsupported* error: Unsupported repository feature(s) {'some_feature'}. A newer version of borg is required to access this repository. Older Borg releases do not have this concept and do not perform feature flags checks. These can be locked out with manifest version 2. Thus, the only difference between manifest versions 1 and 2 is that the latter is only accepted by Borg releases implementing feature flags. Therefore, as soon as any mandatory feature flag is enabled in a repository, the manifest version must be switched to version 2 in order to lock out all Borg releases unaware of feature flags. .. _Cache feature flags: .. rubric:: Cache feature flags `The cache`_ does not have its separate set of feature flags. Instead, Borg stores which flags were used to create or modify a cache. All mandatory manifest features from all operations are gathered in one set. Then, two sets of features are computed; - those features that are supported by the client and mandated by the manifest are added to the *mandatory_features* set, - the *ignored_features* set comprised of those features mandated by the manifest, but not supported by the client. Because the client previously checked compliance with the mandatory set of features required for the particular operation it is executing, the *mandatory_features* set will contain all necessary features required for using the cache safely. Conversely, the *ignored_features* set contains only those features which were not relevant to operating the cache. Otherwise, the client would not pass the feature set test against the manifest. When opening a cache and the *mandatory_features* set is not a subset of the features supported by the client, the cache is wiped out and rebuilt, since a client not supporting a mandatory feature that the cache was built with would be unable to update it correctly. The assumption behind this behaviour is that any of the unsupported features could have been reflected in the cache and there is no way for the client to discern whether that is the case. Meanwhile, it may not be practical for every feature to have clients using it track whether the feature had an impact on the cache. Therefore, the cache is wiped. When opening a cache and the intersection of *ignored_features* and the features supported by the client contains any elements, i.e. the client possesses features that the previous client did not have and those new features are enabled in the repository, the cache is wiped out and rebuilt. While the former condition likely requires no tweaks, the latter condition is formulated in an especially conservative way to play it safe. It seems likely that specific features might be exempted from the latter condition. .. rubric:: Defined feature flags Currently no feature flags are defined. From currently planned features, some examples follow, these may/may not be implemented and purely serve as examples. - A mandatory *read* feature could be using a different encryption scheme (e.g. session keys). This may not be mandatory for the *write* operation - reading data is not strictly required for creating an archive. - Any additions to the way chunks are referenced (e.g. to support larger archives) would become a mandatory *delete* and *check* feature; *delete* implies knowing correct reference counts, so all object references need to be understood. *check* must discover the entire object graph as well, otherwise the "orphan chunks check" could delete data still in use. .. _archive: Archives ~~~~~~~~ Each archive is an object referenced by the manifest. The archive object itself does not store any of the data contained in the archive it describes. Instead, it contains a list of chunks which form a msgpacked stream of items_. The archive object itself further contains some metadata: * *version* * *name*, which might differ from the name set in the manifest. When :ref:`borg_check` rebuilds the manifest (e.g. if it was corrupted) and finds more than one archive object with the same name, it adds a counter to the name in the manifest, but leaves the *name* field of the archives as it was. * *items*, a list of chunk IDs containing item metadata (size: count * ~34B) * *cmdline*, the command line which was used to create the archive * *hostname* * *username* * *time* and *time_end* are the start and end timestamps, respectively * *comment*, a user-specified archive comment * *chunker_params* are the :ref:`chunker-params ` used for creating the archive. This is used by :ref:`borg_recreate` to determine whether a given archive needs rechunking. * Some other pieces of information related to recreate. .. _archive_limitation: .. rubric:: Note about archive limitations The archive is currently stored as a single object in the repository and thus limited in size to MAX_OBJECT_SIZE (20MiB). As one chunk list entry is ~40B, that means we can reference ~500.000 item metadata stream chunks per archive. Each item metadata stream chunk is ~128kiB (see hardcoded ITEMS_CHUNKER_PARAMS). So that means the whole item metadata stream is limited to ~64GiB chunks. If compression is used, the amount of storable metadata is bigger - by the compression factor. If the medium size of an item entry is 100B (small size file, no ACLs/xattrs), that means a limit of ~640 million files/directories per archive. If the medium size of an item entry is 2kB (~100MB size files or more ACLs/xattrs), the limit will be ~32 million files/directories per archive. If one tries to create an archive object bigger than MAX_OBJECT_SIZE, a fatal IntegrityError will be raised. A workaround is to create multiple archives with fewer items each, see also :issue:`1452`. .. _item: Items ~~~~~ Each item represents a file, directory or other file system item and is stored as a dictionary created by the ``Item`` class that contains: * path * list of data chunks (size: count * ~40B) * user * group * uid * gid * mode (item type + permissions) * source (for symlinks, and for hardlinks within one archive) * rdev (for device files) * mtime, atime, ctime in nanoseconds * xattrs * acl (various OS-dependent fields) * bsdflags All items are serialized using msgpack and the resulting byte stream is fed into the same chunker algorithm as used for regular file data and turned into deduplicated chunks. The reference to these chunks is then added to the archive metadata. To achieve a finer granularity on this metadata stream, we use different chunker params for this chunker, which result in smaller chunks. A chunk is stored as an object as well, of course. .. _chunks: .. _chunker_details: Chunks ~~~~~~ The |project_name| chunker uses a rolling hash computed by the Buzhash_ algorithm. It triggers (chunks) when the last HASH_MASK_BITS bits of the hash are zero, producing chunks with a target size of 2^HASH_MASK_BITS Bytes. Buzhash is **only** used for cutting the chunks at places defined by the content, the buzhash value is **not** used as the deduplication criteria (we use a cryptographically strong hash/MAC over the chunk contents for this, the id_hash). ``borg create --chunker-params CHUNK_MIN_EXP,CHUNK_MAX_EXP,HASH_MASK_BITS,HASH_WINDOW_SIZE`` can be used to tune the chunker parameters, the default is: - CHUNK_MIN_EXP = 19 (minimum chunk size = 2^19 B = 512 kiB) - CHUNK_MAX_EXP = 23 (maximum chunk size = 2^23 B = 8 MiB) - HASH_MASK_BITS = 21 (target chunk size ~= 2^21 B = 2 MiB) - HASH_WINDOW_SIZE = 4095 [B] (`0xFFF`) The buzhash table is altered by XORing it with a seed randomly generated once for the repository, and stored encrypted in the keyfile. This is to prevent chunk size based fingerprinting attacks on your encrypted repo contents (to guess what files you have based on a specific set of chunk sizes). For some more general usage hints see also ``--chunker-params``. .. _cache: The cache --------- The **files cache** is stored in ``cache/files`` and is used at backup time to quickly determine whether a given file is unchanged and we have all its chunks. In memory, the files cache is a key -> value mapping (a Python *dict*) and contains: * key: id_hash of the encoded, absolute file path * value: - file inode number - file size - file mtime_ns - age (0 [newest], 1, 2, 3, ..., BORG_FILES_CACHE_TTL - 1) - list of chunk ids representing the file's contents To determine whether a file has not changed, cached values are looked up via the key in the mapping and compared to the current file attribute values. If the file's size, mtime_ns and inode number is still the same, it is considered to not have changed. In that case, we check that all file content chunks are (still) present in the repository (we check that via the chunks cache). If everything is matching and all chunks are present, the file is not read / chunked / hashed again (but still a file metadata item is written to the archive, made from fresh file metadata read from the filesystem). This is what makes borg so fast when processing unchanged files. If there is a mismatch or a chunk is missing, the file is read / chunked / hashed. Chunks already present in repo won't be transferred to repo again. The inode number is stored and compared to make sure we distinguish between different files, as a single path may not be unique across different archives in different setups. Not all filesystems have stable inode numbers. If that is the case, borg can be told to ignore the inode number in the check via --ignore-inode. The age value is used for cache management. If a file is "seen" in a backup run, its age is reset to 0, otherwise its age is incremented by one. If a file was not seen in BORG_FILES_CACHE_TTL backups, its cache entry is removed. See also: :ref:`always_chunking` and :ref:`a_status_oddity` The files cache is a python dictionary, storing python objects, which generates a lot of overhead. Borg can also work without using the files cache (saves memory if you have a lot of files or not much RAM free), then all files are assumed to have changed. This is usually much slower than with files cache. The on-disk format of the files cache is a stream of msgpacked tuples (key, value). Loading the files cache involves reading the file, one msgpack object at a time, unpacking it, and msgpacking the value (in an effort to save memory). The **chunks cache** is stored in ``cache/chunks`` and is used to determine whether we already have a specific chunk, to count references to it and also for statistics. The chunks cache is a key -> value mapping and contains: * key: - chunk id_hash * value: - reference count - size - encrypted/compressed size The chunks cache is a HashIndex_. Due to some restrictions of HashIndex, the reference count of each given chunk is limited to a constant, MAX_VALUE (introduced below in HashIndex_), approximately 2**32. If a reference count hits MAX_VALUE, decrementing it yields MAX_VALUE again, i.e. the reference count is pinned to MAX_VALUE. .. _cache-memory-usage: Indexes / Caches memory usage ----------------------------- Here is the estimated memory usage of |project_name| - it's complicated:: chunk_count ~= total_file_size / 2 ^ HASH_MASK_BITS repo_index_usage = chunk_count * 40 chunks_cache_usage = chunk_count * 44 files_cache_usage = total_file_count * 240 + chunk_count * 80 mem_usage ~= repo_index_usage + chunks_cache_usage + files_cache_usage = chunk_count * 164 + total_file_count * 240 Due to the hashtables, the best/usual/worst cases for memory allocation can be estimated like that:: mem_allocation = mem_usage / load_factor # l_f = 0.25 .. 0.75 mem_allocation_peak = mem_allocation * (1 + growth_factor) # g_f = 1.1 .. 2 All units are Bytes. It is assuming every chunk is referenced exactly once (if you have a lot of duplicate chunks, you will have fewer chunks than estimated above). It is also assuming that typical chunk size is 2^HASH_MASK_BITS (if you have a lot of files smaller than this statistical medium chunk size, you will have more chunks than estimated above, because 1 file is at least 1 chunk). If a remote repository is used the repo index will be allocated on the remote side. The chunks cache, files cache and the repo index are all implemented as hash tables. A hash table must have a significant amount of unused entries to be fast - the so-called load factor gives the used/unused elements ratio. When a hash table gets full (load factor getting too high), it needs to be grown (allocate new, bigger hash table, copy all elements over to it, free old hash table) - this will lead to short-time peaks in memory usage each time this happens. Usually does not happen for all hashtables at the same time, though. For small hash tables, we start with a growth factor of 2, which comes down to ~1.1x for big hash tables. E.g. backing up a total count of 1 Mi (IEC binary prefix i.e. 2^20) files with a total size of 1TiB. a) with ``create --chunker-params 10,23,16,4095`` (custom, like borg < 1.0 or attic): mem_usage = 2.8GiB b) with ``create --chunker-params 19,23,21,4095`` (default): mem_usage = 0.31GiB .. note:: There is also the ``--files-cache=disabled`` option to disable the files cache. You'll save some memory, but it will need to read / chunk all the files as it can not skip unmodified files then. HashIndex --------- The chunks cache and the repository index are stored as hash tables, with only one slot per bucket, spreading hash collisions to the following buckets. As a consequence the hash is just a start position for a linear search. If a key is looked up that is not in the table, then the hash table is searched from the start position (the hash) until the first empty bucket is reached. This particular mode of operation is open addressing with linear probing. When the hash table is filled to 75%, its size is grown. When it's emptied to 25%, its size is shrinked. Operations on it have a variable complexity between constant and linear with low factor, and memory overhead varies between 33% and 300%. If an element is deleted, and the slot behind the deleted element is not empty, then the element will leave a tombstone, a bucket marked as deleted. Tombstones are only removed by insertions using the tombstone's bucket, or by resizing the table. They present the same load to the hash table as a real entry, but do not count towards the regular load factor. Thus, if the number of empty slots becomes too low (recall that linear probing for an element not in the index stops at the first empty slot), the hash table is rebuilt. The maximum *effective* load factor, i.e. including tombstones, is 93%. Data in a HashIndex is always stored in little-endian format, which increases efficiency for almost everyone, since basically no one uses big-endian processors any more. HashIndex does not use a hashing function, because all keys (save manifest) are outputs of a cryptographic hash or MAC and thus already have excellent distribution. Thus, HashIndex simply uses the first 32 bits of the key as its "hash". The format is easy to read and write, because the buckets array has the same layout in memory and on disk. Only the header formats differ. The on-disk header is ``struct HashHeader``: - First, the HashIndex magic, the eight byte ASCII string "BORG_IDX". - Second, the signed 32-bit number of entries (i.e. buckets which are not deleted and not empty). - Third, the signed 32-bit number of buckets, i.e. the length of the buckets array contained in the file, and the modulus for index calculation. - Fourth, the signed 8-bit length of keys. - Fifth, the signed 8-bit length of values. This has to be at least four bytes. All fields are packed. The HashIndex is *not* a general purpose data structure. The value size must be at least 4 bytes, and these first bytes are used for in-band signalling in the data structure itself. The constant MAX_VALUE (defined as 2**32-1025 = 4294966271) defines the valid range for these 4 bytes when interpreted as an uint32_t from 0 to MAX_VALUE (inclusive). The following reserved values beyond MAX_VALUE are currently in use (byte order is LE): - 0xffffffff marks empty buckets in the hash table - 0xfffffffe marks deleted buckets in the hash table HashIndex is implemented in C and wrapped with Cython in a class-based interface. The Cython wrapper checks every passed value against these reserved values and raises an AssertionError if they are used. Encryption ---------- .. seealso:: The :ref:`borgcrypto` section for an in-depth review. AES_-256 is used in CTR mode (so no need for padding). A 64 bit initialization vector is used, a MAC is computed on the encrypted chunk and both are stored in the chunk. Encryption and MAC use two different keys. Each chunk consists of ``TYPE(1)`` + ``MAC(32)`` + ``NONCE(8)`` + ``CIPHERTEXT``: .. figure:: encryption.png :figwidth: 100% :width: 100% In AES-CTR mode you can think of the IV as the start value for the counter. The counter itself is incremented by one after each 16 byte block. The IV/counter is not required to be random but it must NEVER be reused. So to accomplish this |project_name| initializes the encryption counter to be higher than any previously used counter value before encrypting new data. To reduce payload size, only 8 bytes of the 16 bytes nonce is saved in the payload, the first 8 bytes are always zeros. This does not affect security but limits the maximum repository capacity to only 295 exabytes (2**64 * 16 bytes). Encryption keys (and other secrets) are kept either in a key file on the client ('keyfile' mode) or in the repository config on the server ('repokey' mode). In both cases, the secrets are generated from random and then encrypted by a key derived from your passphrase (this happens on the client before the key is stored into the keyfile or as repokey). The passphrase is passed through the ``BORG_PASSPHRASE`` environment variable or prompted for interactive usage. .. _key_files: Key files --------- .. seealso:: The :ref:`key_encryption` section for an in-depth review of the key encryption. When initialized with the ``init -e keyfile`` command, |project_name| needs an associated file in ``$HOME/.config/borg/keys`` to read and write the repository. The format is based on msgpack_, base64 encoding and PBKDF2_ SHA256 hashing, which is then encoded again in a msgpack_. The same data structure is also used in the "repokey" modes, which store it in the repository in the configuration file. The internal data structure is as follows: version currently always an integer, 1 repository_id the ``id`` field in the ``config`` ``INI`` file of the repository. enc_key the key used to encrypt data with AES (256 bits) enc_hmac_key the key used to HMAC the encrypted data (256 bits) id_key the key used to HMAC the plaintext chunk data to compute the chunk's id chunk_seed the seed for the buzhash chunking table (signed 32 bit integer) These fields are packed using msgpack_. The utf-8 encoded passphrase is processed with PBKDF2_ (SHA256_, 100000 iterations, random 256 bit salt) to derive a 256 bit key encryption key (KEK). A `HMAC-SHA256`_ checksum of the packed fields is generated with the KEK, then the KEK is also used to encrypt the same packed fields using AES-CTR. The result is stored in a another msgpack_ formatted as follows: version currently always an integer, 1 salt random 256 bits salt used to process the passphrase iterations number of iterations used to process the passphrase (currently 100000) algorithm the hashing algorithm used to process the passphrase and do the HMAC checksum (currently the string ``sha256``) hash HMAC-SHA256 of the *plaintext* of the packed fields. data The encrypted, packed fields. The resulting msgpack_ is then encoded using base64 and written to the key file, wrapped using the standard ``textwrap`` module with a header. The header is a single line with a MAGIC string, a space and a hexadecimal representation of the repository id. Compression ----------- |project_name| supports the following compression methods: - none (no compression, pass through data 1:1) - lz4 (low compression, but super fast) - zstd (level 1-22 offering a wide range: level 1 is lower compression and high speed, level 22 is higher compression and lower speed) - since borg 1.1.4 - zlib (level 0-9, level 0 is no compression [but still adding zlib overhead], level 1 is low, level 9 is high compression) - lzma (level 0-9, level 0 is low, level 9 is high compression). Speed: none > lz4 > zlib > lzma, lz4 > zstd Compression: lzma > zlib > lz4 > none, zstd > lz4 Be careful, higher compression levels might use a lot of resources (CPU/memory). The overall speed of course also depends on the speed of your target storage. If that is slow, using a higher compression level might yield better overall performance. You need to experiment a bit. Maybe just watch your CPU load, if that is relatively low, increase compression until 1 core is 70-100% loaded. Even if your target storage is rather fast, you might see interesting effects: while doing no compression at all (none) is a operation that takes no time, it likely will need to store more data to the storage compared to using lz4. The time needed to transfer and store the additional data might be much more than if you had used lz4 (which is super fast, but still might compress your data about 2:1). This is assuming your data is compressible (if you backup already compressed data, trying to compress them at backup time is usually pointless). Compression is applied after deduplication, thus using different compression methods in one repo does not influence deduplication. See ``borg create --help`` about how to specify the compression level and its default. Lock files ---------- |project_name| uses locks to get (exclusive or shared) access to the cache and the repository. The locking system is based on creating a directory `lock.exclusive` (for exclusive locks). Inside the lock directory, there is a file indicating hostname, process id and thread id of the lock holder. There is also a json file `lock.roster` that keeps a directory of all shared and exclusive lockers. If the process can create the `lock.exclusive` directory for a resource, it has the lock for it. If creation fails (because the directory has already been created by some other process), lock acquisition fails. The cache lock is usually in `~/.cache/borg/REPOID/lock.*`. The repository lock is in `repository/lock.*`. In case you run into troubles with the locks, you can use the ``borg break-lock`` command after you first have made sure that no |project_name| process is running on any machine that accesses this resource. Be very careful, the cache or repository might get damaged if multiple processes use it at the same time. Checksumming data structures ---------------------------- As detailed in the previous sections, Borg generates and stores various files containing important meta data, such as the repository index, repository hints, chunks caches and files cache. Data corruption in these files can damage the archive data in a repository, e.g. due to wrong reference counts in the chunks cache. Only some parts of Borg were designed to handle corrupted data structures, so a corrupted files cache may cause crashes or write incorrect archives. Therefore, Borg calculates checksums when writing these files and tests checksums when reading them. Checksums are generally 64-bit XXH64 hashes. The canonical xxHash representation is used, i.e. big-endian. Checksums are stored as hexadecimal ASCII strings. For compatibility, checksums are not required and absent checksums do not trigger errors. The mechanisms have been designed to avoid false-positives when various Borg versions are used alternately on the same repositories. Checksums are a data safety mechanism. They are not a security mechanism. .. rubric:: Choice of algorithm XXH64 has been chosen for its high speed on all platforms, which avoids performance degradation in CPU-limited parts (e.g. cache synchronization). Unlike CRC32, it neither requires hardware support (crc32c or CLMUL) nor vectorized code nor large, cache-unfriendly lookup tables to achieve good performance. This simplifies deployment of it considerably (cf. src/borg/algorithms/crc32...). Further, XXH64 is a non-linear hash function and thus has a "more or less" good chance to detect larger burst errors, unlike linear CRCs where the probability of detection decreases with error size. The 64-bit checksum length is considered sufficient for the file sizes typically checksummed (individual files up to a few GB, usually less). xxHash was expressly designed for data blocks of these sizes. Lower layer — file_integrity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To accommodate the different transaction models used for the cache and repository, there is a lower layer (borg.crypto.file_integrity.IntegrityCheckedFile) wrapping a file-like object, performing streaming calculation and comparison of checksums. Checksum errors are signalled by raising an exception (borg.crypto.file_integrity.FileIntegrityError) at the earliest possible moment. .. rubric:: Calculating checksums Before feeding the checksum algorithm any data, the file name (i.e. without any path) is mixed into the checksum, since the name encodes the context of the data for Borg. The various indices used by Borg have separate header and main data parts. IntegrityCheckedFile allows to checksum them independently, which avoids even reading the data when the header is corrupted. When a part is signalled, the length of the part name is mixed into the checksum state first (encoded as an ASCII string via `%10d` printf format), then the name of the part is mixed in as an UTF-8 string. Lastly, the current position (length) in the file is mixed in as well. The checksum state is not reset at part boundaries. A final checksum is always calculated in the same way as the parts described above, after seeking to the end of the file. The final checksum cannot prevent code from processing corrupted data during reading, however, it prevents use of the corrupted data. .. rubric:: Serializing checksums All checksums are compiled into a simple JSON structure called *integrity data*: .. code-block:: json { "algorithm": "XXH64", "digests": { "HashHeader": "eab6802590ba39e3", "final": "e2a7f132fc2e8b24" } } The *algorithm* key notes the used algorithm. When reading, integrity data containing an unknown algorithm is not inspected further. The *digests* key contains a mapping of part names to their digests. Integrity data is generally stored by the upper layers, introduced below. An exception is the DetachedIntegrityCheckedFile, which automatically writes and reads it from a ".integrity" file next to the data file. It is used for archive chunks indexes in chunks.archive.d. Upper layer ~~~~~~~~~~~ Storage of integrity data depends on the component using it, since they have different transaction mechanisms, and integrity data needs to be transacted with the data it is supposed to protect. .. rubric:: Main cache files: chunks and files cache The integrity data of the ``chunks`` and ``files`` caches is stored in the cache ``config``, since all three are transacted together. The ``[integrity]`` section is used: .. code-block:: ini [cache] version = 1 repository = 3c4...e59 manifest = 10e...21c timestamp = 2017-06-01T21:31:39.699514 key_type = 2 previous_location = /path/to/repo [integrity] manifest = 10e...21c chunks = {"algorithm": "XXH64", "digests": {"HashHeader": "eab...39e3", "final": "e2a...b24"}} The manifest ID is duplicated in the integrity section due to the way all Borg versions handle the config file. Instead of creating a "new" config file from an internal representation containing only the data understood by Borg, the config file is read in entirety (using the Python ConfigParser) and modified. This preserves all sections and values not understood by the Borg version modifying it. Thus, if an older versions uses a cache with integrity data, it would preserve the integrity section and its contents. If a integrity-aware Borg version would read this cache, it would incorrectly report checksum errors, since the older version did not update the checksums. However, by duplicating the manifest ID in the integrity section, it is easy to tell whether the checksums concern the current state of the cache. Integrity errors are fatal in these files, terminating the program, and are not automatically corrected at this time. .. rubric:: chunks.archive.d Indices in chunks.archive.d are not transacted and use DetachedIntegrityCheckedFile, which writes the integrity data to a separate ".integrity" file. Integrity errors result in deleting the affected index and rebuilding it. This logs a warning and increases the exit code to WARNING (1). .. _integrity_repo: .. rubric:: Repository index and hints The repository associates index and hints files with a transaction by including the transaction ID in the file names. Integrity data is stored in a third file ("integrity."). Like the hints file, it is msgpacked: .. code-block:: python { b'version': 2, b'hints': b'{"algorithm": "XXH64", "digests": {"final": "411208db2aa13f1a"}}', b'index': b'{"algorithm": "XXH64", "digests": {"HashHeader": "846b7315f91b8e48", "final": "cb3e26cadc173e40"}}' } The *version* key started at 2, the same version used for the hints. Since Borg has many versioned file formats, this keeps the number of different versions in use a bit lower. The other keys map an auxiliary file, like *index* or *hints* to their integrity data. Note that the JSON is stored as-is, and not as part of the msgpack structure. Integrity errors result in deleting the affected file(s) (index/hints) and rebuilding the index, which is the same action taken when corruption is noticed in other ways (e.g. HashIndex can detect most corrupted headers, but not data corruption). A warning is logged as well. The exit code is not influenced, since remote repositories cannot perform that action. Raising the exit code would be possible for local repositories, but is not implemented. Unlike the cache design this mechanism can have false positives whenever an older version *rewrites* the auxiliary files for a transaction created by a newer version, since that might result in a different index (due to hash-table resizing) or hints file (hash ordering, or the older version 1 format), while not invalidating the integrity file. For example, using 1.1 on a repository, noticing corruption or similar issues and then running ``borg-1.0 check --repair``, which rewrites the index and hints, results in this situation. Borg 1.1 would erroneously report checksum errors in the hints and/or index files and trigger an automatic rebuild of these files. borgbackup-1.1.15/docs/internals/compaction.vsd0000644000175000017500000056400013771325506021513 0ustar useruser00000000000000ࡱ>  Root EntryRoot EntryF@VisioDocument vSummaryInformation( DocumentSummaryInformation8  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~VisioInformation" ՜.+,D՜.+, `ht  t Zeichenblatt-1SZeichenblatt-2SByte oder VariableK Rechteck Va Umgekehrte geschweifte KlammerDesigneffekte.1Dynamischer Verbinder KTextanmerkungrb ZeichenbltterMaster-Shapes0|_PID_LINKBASE_VPID_ALTERNATENAMES _TemplateIDATC010498411031Oh+'0 X`lx  Microsoft Visio@      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrVisio (TM) Drawing vRvRVآV3?wVآV3vLVH3?p9[!i fTd',50fZOlO~C.ߐE#PQc:  A ,U345&6&9Eaaj6 8d7?!%3 URg L-ak@9$KtiQHeuDDT(!cp;/paM-uh\`(+AT %5+H>>5a%h34ovJuX0Hp(!MmcP=?`(z?84?D0pA D7!U/i 4?Q`_1_JD;a3(/@}_L/^/L%a$G[aY %//// ??.??R?d?v???V[l_ru@+y_w-uaDd/r*&oo*o_Qv4DOOO\U$/@__o/DA/Gj:d2(joG2O5j7@oo ooo?&s2;IO[Odd 6 S( $b4@SyYkO(Or&̗T4EDP7H+&g-.UEd \Uˏݏ____"LwU2 r?߱GXDeTePNkcc#ER¿{%ܶ/o Mv{&l|$6&w-!a!!!1Lpqq;PasqKwHVNT`F[ȓ !z䥶1 @Rdv%Iy -bbbbbbIbybb쩓DMv4,}J(̽~ .?Z//puR/MrGK9&/@:?LyǾٹ  /r p/9cߓAdUe?w?O2{OOOW _u// ??S?_,_?2Y?????oO@$F=OK]sOOfOOO"e 3C _#5@_R_d^uc,aYo-o@CgG$t$#hoIlZFoBQ`Ѡ1`ConectrWigtz70aϧAQ` Ed`cl&K`Pten´99K@q^Wc8Ii߹l‰ݣA͝>†p&U5Ruig;(X3a ŀ&U=HTaWsprnyC™ͺIJrBgt24`.SQȕ0xPt4"H$ @Y,b'@ I A-7 ;Ut4"H$ @Y,b'@  C-#*7 At4"H$ @Y,6@ ,I{7A-7 ;t4"H$ @Y,6@ I7A-d7 ;U!U"#$%t4"H$ @Y,b'@ T,IEC-_*7 A&'t4"H$ @Y,6@ ķIA>-f7"AU(+,t4"H$ @Y,6@ .C-$*7 AU/0U1234U5678U9:;?@UABCDUEFGHUIJKLUMNOPUQRSTUUVWXUYZ[\U]^_`Uabcdt47"H$ @Y,6@ Q|C-T2 AUvefgUhijkUlmnoUpqrsUtuwxUyz{|U}~t4"H$ @Y,6@ t_C-D*7 At4"H$ @Y,6@ ICA-Q7 ;z23456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcd*t4^"H$ @Y,6@ 4P6C- A;X4@t MR@-NR@lY9MR@Y9MR@\iNR@LZ9IR@Z9.NR@,[9<PR@[9NR@ \9[MR@|\9RRH<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(4EdVREdREY9rREZ9REREZ9REZ9REd[9RE[9RED\9RE\9RUR/0 e UFD# Th(/TYTYBBUF~?x<F BP(?4P?+ O @HZUBH??!t 07{B`8Byte,BiC ,NK bU lE"KY"i;nG DaC"h!TA pG SoUfC wm rE"S{ d!cPe ,>bjE kC  e^| 7  3u ,7=OasNDas Shp}eaufudZicenbl]t) ze.HD   3 B>Tp[/Lp?as&;.h> ,b MJ=UF~?FV*JR?F\.?FM&d2n?Q6 U AMA333u` ? !+5ChMW]u$g>5 L@X!$5 /`Vis_81.chm!#4E"67M `_!b-"#"'?9h:AiA 9 $''2ǥqE0&!$E0&!qP \@6%A]Dc]3V:?Ka!Db1OCK%YEiO{OL@ODJv%YKOO"NBSB53^@M^U_g_y_Rg@14ot'2TALd"2(Ech,@!3qlf1U1 m3 =M!l+TM9M]a6 `($T 6D UF~@xTiC;'h>4bP9 EUF~?L&d2?F\.?Q6 U AM| ;;u` ?*#)3=KU_euo>5 L@X!#$5 /`Vis_Sba.chm!#57_ 2#`$g!-*#*'?9  %&/7 aJ=?Iy+#&Z&P?B#2N贁wNk?@J'q ,A MVq6q4!*#ekab1; ?Eq6 I-BCb:@e2C1%>> OO1EA5~%5]NGxOC9AOI%\O`JOOrn_gPo|'TAFd" (?cp,@&!iA%9 -$b]'/2q`&!gE0f*laUmHE?KlT(]9^]-Ma6 `r (QJE-Os~%_s%/\h}Mf!>zbcrA bb HUr ~L)uE/>F *#DJB dYbbDW']Qo@t^SoCPUFDfP h VTB UYuU??Fxha T,aaUQJ\UF BP(?@?F~?$-?P nL#l]dV^ ] $g]Q"U"Y*"]>"~#<?&#A&Br&C"u` ?PYu"bl""& M&u"u&u"u"u"u" ,u" ,u"4,u"H,66t#\"u"///$u)ar;21'u `Fxu21yB}BAABA"!^hO$,@ZLupO耑 @Ca4R#qt{u*gvXWcZufvvWuTj*uX{u ]ʤ *aGIJKMNOIQ"R$QTܝQVWXYC3333.3>3N3^3n3~33333333@33Vf4b@Vqf0φЬ uZ1[]^s@333'373Gc ƢEt&EAxÎ;BA}@c"3"3."c Ӣp x/3,S/e ~/iRfSIA62ab ZaLxVIǪ *:Jzךת'OgEa@braĠqAQJ`FWO@r ЀiC}F@`I@)@AEBsZeȓ3U ,puӐe(p 4>{ScNV:fcfR:bWKqP?@pA3VUbQw Sio@(h#FtϿF@ qRfa+_i&7=akVQhaRkQclQohaR2brˡF@#I~rW{ 3~TE}O bqO cy`I@Ё`sVRڀfK@ڀbooOlw wKuK`1s*8zK@JqyozI@ eoo /uB s!xx7SlP4;walPe,0kNq @Jq.-8r JqM9-t '-E1*dbt+r sQJQLŌH{M0tb;{O,=CƁS=ԯןƋM2'Kx~{עamzD1-?F|ǓnJ\˟ݟITjU˫ˬ7F:-˫YkIՎ_TF ?BP(!RQ!4 Iz{7 F.!`}~k QhaqY.!(SFxfb@&7! kRka},1ff",1~,1ff ,1WVHf312E1kaB12lQ12a(712,1HfWV3UYkaO} mlQYBaYO(B,1WVHf3ka lQ"(;" FHfWV3Y"]#kaS#" q#lQ]#"]#S#"QoDcjWV3,1HfEFWVCaHf``5d 䰈qGA8rL5k0BAK6 k0L5k5L5|0V!CBL5 = |5QQ5&t?U: =|5vŦ547 P@QzqE4HQ7b&qn24Dp)CTU+0F2o$0jjSRjaRD9Q#eM1sB=yIZR1s^lZLV_4pQL׌HH.5Oa 5.. ^fCmnfv3p` T]`avelShift`$xa_d@a3p`OΠ`fs`m!*_a@@ VV%Sr V:67sGr:2!l7s[r 27.1i$3s 1T$y@a@dvkQXU.q.'2rqty!,0}v`n$qqt7v-x8cʁEl@baa%]1]1!H$rI*J!1LLLDŽkq+MԄ .NAOAAsQRe.S"T/.rq.qqAWVqqFYpRBZ}/b[.)\r].QLQ_1(A`ٓza(.44.;[Rx a;`"[TBf"< 1W`B`sh]`cu g`f&+o]`m`t`l`g`l*?`0qH`.:jx +\T`x`bdvq_v!L5~*ϫ16DP`)_R`Ht%k` 7DVhzڠ2xQBuݿ)/\BanAM_0qx2Ϛ0AP*(O`a`*4dFxgy2?Wʷշۯ-K08&4Fb[mD{ǿDD!s 1ϱPGYϋ(6ѲĢϩȩ5hA`# &AsDEy3 UĻɯAv^2`OnwAQ9)(ĽBT0fAvH`Uta)Yp!3EWLnew@)ncbā Av9H`ER#t6Hl ~)99:'ϟoP$tm-At!BT)ங'7İDZEBs&1)/I4+r/o/Av(* t/% *Pa#/?4Vir4F:?@^?mAiCOOOO6O=nO Z\9?K8/ `v GAAtPT&xJb=rb+ Trt*P@?B#o54trptb T mkqőԁw`3P?squ{tcF~ϿF@ Y%ukp{`?u %,Vrq @V4`+AszSPsT3 )5>%j{ိqi!stgq xŕ2htbrQ? !3'󿂱/0 c{[яJar<`Ѡ 5 %*5}9ᙓ @r^rlA$Q2U lQQ؉uyhU+424r@1Ryɒ^rx@7.?@hzi?wԐH}߈7c6z#o/|UD`a1H_\"p1@&WrBdeKTv7.!ԟ@V) `aGo,o>oPobo1qyo>TooODowQS+F搞R6);=[vrrQv>LUF+q StFUevS߅x$~&qޗ݅ԯ1).UaO|tǢKT*ϰd4)'ߕr|<ߒM2/WkNȏ%rQ|evʭVlߟU[#Իg.gM1xweۿoo1CBSCS/e/w/S~eB]v KcVFO/GXiPdU:Ǽ ڄE-,/wb/cƻ͟E/?'?*&)BOZlcƄdUO7c*&tG);AEOb1ҿ)M_Cb.ϛ]!_R8Vb=x](' _ ݂?z-sT_Na2s/@.g(O]Sa5 _;;L7pwrra2wcCB|WV.gWi gU1gtO@|{럘p  D g,l;;.o@o*aa Oo0Ґjo2kJΆb`b~Mk 2(s.®%)nd\S?l$`-@K ay!*t1K쐴^VQ2CIUdUpς1aanOa7P7߸y!X16BòJa@a WE1!UަQb@72Ce$6N ¸ջVcIpb)L(aO[Ne4 (ga# f8 \ 2d T!bu-(;M_qڋgֿ`4@?F\oA`0ΉjM09 Q(3b8ye/˸?FƣE?Fp5Ys?F5&!Q@Ή( 0_Bԃa2U0*s3?ܵ0Ԃ2 k+ݓ0]'%Qn?Fѡ?FW^bd%?F?Ԋu-Q/c,DJ}?5^I,QIƥ,$~//'% ?F*^Y?F[E?F|{T?c,|гY,vݥ,-?C6*~??b'%OZ`?Fa)P?Fů$_?FPvϟ'͚?c, h"lxp_Qҏ,mV}b!)/L FAOSO+~'%`0DLJ?FNqVUO:Oc,~jt7A`,ףp= ,7d__Kh'% QuU?F`4pF?FLAD?X@t]_c,io?JY8FZ4? qۄV$N@a__'%X/~%l3!?FeY?FJ~ӗ oc,z6>LZ4kw#awivQooj'%'-q?FmUa`2#+?F(%oc,Y7 }j GfZ4 ~,ClL^!UFV{WE?FcJΦc  )?H},""3\ \ -YVqjǻ )1̶ Ѡn<δ m|ѠaQaCSVGh4cfUpi$5,)֝F3L&AP"iЧ Z3*p#q8 |`rXӗze2ZՊ@ #5.?\zi{B  ȧ#ޯ&&JJ\BMV㖿t侜+Ģz`rl%ǨΩ)1@8"4߀Hߒƿ./1*Tύrˤ]c-=( @=5-2-9#.@"~C Ȓ#@ 7?D`Ofrsetx %q,kڡv hE 5ɞ)3U+KGAb TrKvlԐhi"//ׅDzW280//?(?< ????QcOwOO)O7(QOأTx7HUI' Xߞ^Ba5a8_A88F< $"/}#ST݆B CW]T@eU]oT7Tj(PUFDfP h-VTYYU?"H$ @?Y,b'@?xTT;͑U@ !@ү&P6 lAu` u(Jݩ c?EDe34 D   J ũ2qL lvF BP(?L0 `LineTray sp r{ ncyz۶&` v%P t e n&$`u v%W{ igTyt&a` v%_Rouy dw")g&A0'2A{Gzm?>5Q?A^n"`Fw lo0~/+n"j?|?+5/!4`S:Ta ow?<*+` 9?& ` 9S5@y0eK $KXOf@s{ 'B $KYtO 6`9UM@gy i@i04Ai '. `%9Dw AcNul>k$Uhh2Q2QEXT/J> 09([_q_[_o!otgU__o_oTEooo,o1A>Te\Ls c!2TCt1MU$U<"itjt AktE*Q*QURmtQ!!otU p qU r s-3"!!uGRvTlwaExn11Uz{Y|})~ARɄ22քeㄐqJ"T c 14.m"?<%Rpsf0M8@H8 MEJUU8!r5GrK&HZ&H͟@G+G3"?GERml&Jlmyome>mGG-GAUV>6HԂyϿHႻFSP6HuANqe (=MQ_'0Uhv$on!3` LDnFxB'3` A@iBݞ"4` C m@bAxB}lewL9WIFE/HU= _6p9yEW49dF<1L#DMB U]]@e\Y]o+T8]:aG71]PUFDf h-TYYU?~@x]@L'} V6lX-1u. Bj2u2Z2j2#u9r#ULH/MZ1+B#AD5 60`Vis_SE.cTm!#20AD%`>Copy<0wigTt ?DU1f@M)@c<0o+@'ofdBUArX@Aad@iV@n3@ WAl@ef@R@6AtBvBb@hB@lBAJ@=## AoG?9/#n"E&444 7I#$@HB59_lj#"SV6U8*l>(Uhn E /J$&9"(Ln eg5Le#pheo'T !! Js~o/|R12'"U7A%O3W_7xr5(-6 `F2 xqu\J@#:0b)&`AAs)@E T@xd@Di e i=M]UJh#HT0#3 UEJ\}|U@f?@3!?F~?FL&d2ٺ(1?P Ǜd#pa HkaQ"U$"Y8"Mx#BU<&?&@&A&C"u` ?F]o"fpy" & G&o"o&o"o"o"o",o",o".,o"B, 6 6n#V"o"///$u)ar;21'u `Fxq21uBAy B AA21"!XhQ$,@pup`Ca$Rou@+u*LfXQc* IRg fbY-H- M ZU ZZZZUZZZZUZZZZUZZZZya-yyIvJvqyyMvNvOvMyEQvRvQyTvTQyVvWvXvYv3* j|%5EUeuu 4 蕺!BZ)CT@x@U @n@o@a@%iEPn*Љ4J1[v}y]v^vc@x3+"" qؗAU2Aym@xcv$"" zp$E1u/w3,#? N?$hVW9AB[bSq20S hV1Aqׁסױ!1A7Ig?abrXqkiqruׂ6rS@6'_@rp9SR2MV@׌aV @aU-ܕ?XrcU:uJed*ɮ᧤N@q37sfjBA'b6bjB'#KPRQʾqjeArQ c@(3FA{aٿA3b!&»F_ v7_M%;f!xjB!s(a?xjB,2brrP#VFI%r'#7 3%Р$UM_ }uj_2 ׂP5GׂWsZ2bQwvPQrT_@|wu‹ׂPc?P 5uR܉͏Cgyc(` q(`e80{A @9-܎ Ʉ '؏bAP42Dr \sya@Q_X俋NZ]b6 O6͒z{ # Z{u£@%ЊׅqmϕDR1oL^c,ߛѯgZ $j%{|ב R{iۄ);qσ⥞occL&d21b}aN/9K!KG'!cpK1a81YNSFxBD $e0Bof$z:cRTrDBʩya#HuzR j"^vc ` T-pavelS/hit`RHq/tq `OfYsfps}1 oa@@VVSrlV F"1+-27i41Tb4_qt_;aXUZ'2rq_!n0^Spn4"1PT_sE<raa-A-A1Hc^IpJ}bL;M 2NQOQ1\Q\QQؔcWRS򔆮TqqW&o$Y@RZMb[Z\g]tpapa_TAQQAbpJvq;Ϥ+bܤoa pQ+d0AW`BfpsĠh-posugpf&+o-pmbptdp^lbpgfp<XR*R?`0ȁfHT` jd0+9TfpxrpbASv_vPQE~ 1;F`a`пҡ<Rfp%tknpr !3EWj|Ϸ2HaB^׺/9Bmqn*<0NHBew0yQ̀~`ϿOdpahp+=#bRdﷰ4z:$ߢ B<R ai*rp iisXmTZdj `F1H<zՕ{xMOai?ޯ!_` tD.Jr5<}= ` K* `[Z l v 3mAxD{z{.gMbeᒎcQɥģ"Ĕ,/ħ<w{) ѫcĔpĽ} ѮmJ5N13_MsSspy`ID?S/c}ba K"eb@o> )עa2¢ `DVh%# UD kmjBt2ߧ߹$c褣%!rr[Rf`}aTy7[C3DnUbe|b;ruv`Up0;nr`1:븁X%`?$2$N1} Y4Brb@qAa-aa;qa `//5?*?cT Z "!0ЏQƽP׳M+Q ܒ*QDb)QX\T׳a_uu`bېRIu}ܒQ (l\O~?hzP-îLޯ)ّ `{Cݕ2YǕT `ƲG . !ё1ɕHᝢ WGMĪ ?Ɓ%Qx?ˆt/$Ͳt ڔ,ş$O4  ,o>oPobotoobo1*BBZВՑ7zBoaa^屓aG9gl6ҩ$xrNPƯدk)D _& 3EWi{ߍߟ߱ /THZld Z_VjhRdv=X|w$%F / >Pbt~44FJ3x<T+///(/aNO:QN9O:////./??'?9?K?]?o???1???? OJEOcUQOOOOOOIYvotuoacVh͟y_T_ j &m toB)j ooQvwёʮhg_#F K?F&?x?0l}G{,:`@"   -?%aEr%Õ))EyXR} o`д$#?ęxQIr/xUt,bwXUl0i1ewvv֔tQ=Q)R}h4FƶwGYs?CpYՕY 4e%{QHvsLQgŘ{)tUbzf^ȯfܓ֔% E:a`*q>^TbtxoMdǏ@هT/f/˖{OtC]c VqǐI֒FV –nUF?vn qsɿ *Ys?F>?F:Xc?FMڪ﫰|JܡtQq&A=QngiC21MwZ漣A$rqg0N܏tE˔F*CUy ,Xjw=Oas߅<`ߦ7 {RC*OoNkrfŜo-?QKe_? Obweoz}&U.V@U4kW5wR<k 7 fwdůWdvw.ϒ,u6^.ڌ@(]/5Uk@ k@Dw,|@q K0/'LK9'Cm0ϾSBs2ّ5)\ls?1D@ q>Aw9#uQ y~qҰq٪$E)U5G1r9{ a`pM>AkVRbדL!.UEG!ܱ'kWH٤*Jn}viIpU'LG(A^|{Օ4߭9w7;Օ f!+S2)kt/'م"Nj$6HZl~P`4@?FEO jM󎣈PuY Qᦤ8%/˸?F#eֻ?Fp5Ys?FwtÊG?( 0O(LпDJգ<}?5^ITLQIjL$?~OOI5 ?F;?F[E?F3Ϛ)O(L|гY<ߍTLvߜjL-C6*C_U_'5OZ`?F3$i?Fů$_?Fir8_(L h"lxHOj?FeY?F܇]}(Lz6>19lTkw#N<vuqRd/%5'-q?F0 B,?F2g#+ׄ0]/٨(LYЍj@ T jLCl#15v?3FV{WE?FD~ΏJNk(@(+ WIHg}jLzB՟$q$D*P!)=6?zH7j A^ 3=< 2Aġa1aS~h4'+5.ϼ7O9H%F_p8Ɋ( b-SlJg&ǯٯ>qA>%7IWt?&t:NO/ag#mҳ31>ũ!bX,!.@-R^rC&Aϵπ!EWiSh:Mu^9a.^;g?&h&8!AOϓ{EEEW{>h"R7"(P!1('b!)1jUP-29Z3ˡ#@$ 1d>"wCCZ3Q%p1bؒC!p2@&7DZ3`O}f70seto?uܰA0&2,?>?P:v72p&1"E$A1oٿA32ÿ&A>+5R{w,0';)w-eV-jUCU+ a ?%A'; WTrvt0lghin0w8M=Bb7OOO;J!HR?+vOOOOO__????(?q>_]?u sEtjqttHD: #  h  T0]]9 # B 2AUAɿAhY?ٿA,c2ֹƿP6 Au` ?A@j$u#J.9axu`u]`bu`+u >ڍU@@M&d2}34"ÿDC'>"M-w'>"D(p A "'$"&H' O#&5 $s&]$2JD=#D> D6#U`l"_# #1?9#H1 uNWzIs;2 8J#2zGz3Fx$2[Ru(/YB DU_V?\.?/B:o:2A7P`BackgGPou_ndCM`lM`HQz@@bZ}H D?h1GA #X8P6d3662Q1CUBF4i7o117;y0~1Mr)Srm>de}mspTt "qq"1 1381812tMItBB Eb'b$4ANu13[h2AuBH1BʜA!ʩ¶jÄ[QЄT݄zrf>!$0 %3To1/u/u?8XxAC`IWsSwSwRs%o.@0RgPf KNC`teI`\ai!nn3uZR%d DM`kO`mtiM`5HQ-2HZv#`NC`o>!8V@MKsEŭAد3dVrbRaPags0bjkTykp~1])S搹qO`zfY`u^s;cTͱnA²oivlPA$*q#8@q#5$rQQR*E?eNRעQcBdg:b`OTndGPuNEb;StRaaGPdB[Ÿ-uijd[„6k~]QRƀ@gP@ A>LEs?e\/dϩs Aee"g 9bY`dt2D-Ÿpsr- Sqv߈;`aݔ֑#pS~>"ƒE*ȯ:b` Txk GPp}1Բydc j]aaAe(Se1dSrʴ &e%\g#d ɿRf);M_=7DßӁ„{81,[/O^kxÅi-ùƁ3 uI[Uu`  *L%LBW?AP)Es̲aٳTe"/)#We"ɧ??? ?:OOB&c_ //Y`6?H?Wis??]CJ_ oAUCGP?e`(__! m2oDoVohozooo0 siԛOo oW}_oOOOcV¯]WץR=Oas-](]uќoo~Gm#oTw {Ɔ҆΃S 6:0Bw)s͟ߟ'9ρJca:smQm Krаe/Ȯя- voP* V²#5VUo"/%Wo"O _._@_.o*o/**u(!S[mJ5li@eu+arw% UR°l;Q@nt9'G} !M @o#1Ϡt+o_1OCLRʿ.@=(*jMD`sprkND߂ϚByiϹV" ?5G&8߭\n߀޲5F/=$2&"4FXjj"j"/ɨu*FXjv_?q????9 ???Qc0OBOTOfOxOOO8B+DO!O=`Rotyfb)]O( !1idBm!?ȅKbB;rbrc`R@*@;H(iPB_)/BE!1v])g%oks{1tpnN;GWewyuPz%doaqm Bqo2;a2wtSkmQb@nvUaTtnsb bdds°bjTq3o: rrcjKbT!E!222?kl;!30kC2m2  aZZ!*kψC\\E&&BGO ='`C²!cRf Ot9d_D@qA,@"hi."4 b bA' Y! Pk?zz!!_lDZU!!'7!ƕP d>"p}9+6@eDX]o}G]~.PB Z` P_(LT?:]PD',E5IϿ ,1X8d:PjH1/U0r4,,1)*AA/_"_o & #QMQ,0#` B scriftungm,1 `Cal PoPtQg1`E}u+iH5u1! 7 @@j&@@q`o?@rb?d-e?s)gUA$ ?$%vr;#xAZ%RiLPE" 2 0"U$2 ur! |u[]4 t&&s~rzlH1AUH1?iiu5UAA2zry$"!rByǤo VPQayl p-LPnPʡn/k?Pp rN 3Qiߢu PuPw 4EPtTIkrPo?PlΣV?PrPTRʧIEz!mr󢴳W 5D b9BTBnPnߤz 6VOLA !0jA_S_e_ŕ{W'&!3AP!|/2g1 D1y 3 H1D1oE7q;Eȧ{e|lE1@@~!@Fg#?F֌"??QkPyA`Q27W˚ATl 1<25l =cl@YmfxAj`PL!@@[g?+@@1#_  +??@*o4?PD}p\z׈-uru`u0`bcu ,0@@"o@a/ ("q6y B%#u2#*"0#ᝯ2,`6@@Ên`P @$Rxww &;01O5l^3);hK(a~"f''ׄ?[銞<<h.?l"dYկd#j_u bsUTK0a?DbB`@_bz I/!*98`P"3\ o"҂3q@551I7zrQQr 9:z:a:a9aaR=R>rrqdQhQrq(Qr q2aa0tQmO\[|RlqlqⴋrT,@rLQux҅t0q^qs pC&Av"rBpsx^֔u~*k sa4azuNu ca@bֵf7sa9ՐxU܅ŔrQ9PCaLJY„fD/@rF*PPV#Oj&TP@Yn`@Q.%Z$yaUBafp Uu!4q х4s q, Tto! ТZhQZi5,Qrc3WLP@*(B<@b@t^?A$@o.a#@PkIKr3b3KlqJj/ â\Ca/O Yn`PQFYYCOǐ2p:F>pKBrOgOq/LKHHKEKːYCJ=Aұ)VZn` OzOOOOE^Z___*_<_NXQ3|GCA|epPKd @A[-B^ cAY£jNT '_%k…Ž_Zoc ss&qg1]JSr|([w-v`DbKOb=~h>fkviKleN{>f8{gyjPsKϝ̏|vRTXc[HAmeZfDo1ock2DN&xݟ^pP*wwjo>f?ʗTٛg>f,GY^4Fܢhn??Qa%<}dAc)#]K2At^FA1?0_P_b_k؏?@ܑ9Zm(͖pB?@qK:?@W@ή8W?@S^H?ssZ*m(5=*/P#&?@!W/?@?@FD'A&a?@0?@vj6??@^ŴBI!O&J6/iлH$?@ T9_3`?f\Θm({E=T@m`m?@l6͊`$Zǂm/&0=ficUo@@K^9`lخgA%=,^?@tA XP@@z(fkT?@Ô1fe=j.ƀ?@P3{@@5 I]ͻ?@+ӟ wu=~@h{?@j"S~#HXt$'=)&3I{(?;3 BP6Oc= }…lO'ƯدH6?[0 O۶f_@3l~ !H<@)>Pbtφ X?y3yGPU)^BPԇOOOOOOO__)_;_M___q_______1i/{/^X" 6q'AS6m#p P:yad4//*/E$B ?' `?FM%:# %?Q.(%,",x>Pbt}R,>bMt=ζȏڏG"fFXj|ϲxU,įFX*pԯ8GB(ma AS4!̱ Dq 2aaJYPcy08̱u) {`gl6 @@L"@@Hh45?;C?FwG[r;1'u`Fx8<"\YP"Q!cZ"[!y!!02..ȣrq´"T,@-@<إN-9-L7Cz`WAoubҟ Ab;sż@.v̟6 ˟W'?̵A4!)4TB`xAp`n>@tzj7X3#|گ2п6=#@X)ܽ+?-ryN ~Au`u`@rb?5@ȩ.p@Őc)a&M/_)s6})7Up`uR!cA*qK@2-[!Ue)aYգҺ#@+("euF@X,@@cP?$9/f$Ptw2wy#wI`rSbK3`8q \~a, c֩hǐ⣰  ˩5< Nas i !3 w%;G/?/e u @N2 -< N6£1 h5s1.h fx sN!P 4b ?O0A&GWA///&6?+//O*`׹mD11CJ /"ǗNO`O//??M'X'jY K"K(;G"JK>1?/KK__O]?o?_"o??MR& vum]p0cKoeLlOe񑡡ojFxk@?vKuS@Rmw@|PmƁNaGy}A<"po Bpp9Lp A? vEqLJ PAdrHXLJ,HwX!kmieT~A#5@?MVl5^l6qBV0IPo oomDu׿߉Еe n`vc~ae` TmavlShi"ptпome`eOpfҳr$ ienюB@0wo0oO!~_=@`{U$erdT~?L&d2?qnSU^P^ e رT|E?U  ŕHfc IsJ026q6q|qLԣWM 2N&O@Qԗ`R`ԇAT&qUuVrIW)rX6KYCZPr[]8~~^]w2Ѡ%2_䨁@Ldq2јqq-;X |pnԛq'VUAAU{S@X,@@c?P?]2vÑrHTn?O#@2pcr2ql{ԑ١U^@dإʼnU~K/MEp2mԌq $SqU£rIdҥqҦ~ҧҨ(ҩ 5JJҫOI\ҭiҮvүҰD {VuPqM/|qf 1 q/$r89UƀdK/Eb].J?/ ћqXՈ!2яՁ=7&LMP5,Ku/?+/UF BP(m???*?og6oo?M@]f?qoooooo\ke?__O\Z ՁX9/ӘC_U_KXF_}6Z?Fe#?2”,zNt/DF`0 &F1l aPdl/BjZԘl-U%{ 酼N[b?zf^5OHƏ؏bo2oBbd?Ffl6&FF?FuWu~Z}1Ԛ\ ϥRsBTЯ*oo`GÊϮݢ${Կ̗ޟM2.RkO֨ϸ"4FX<ߺ̱ 2U9MSiO]yd!4q<|a|aSeOZAkQw9+OO!@ Ai1/C.</bsPbOo@nZA">Wi{;^5˩?@Fmtf~:G?@?@;:y?@A^}/9*[Jȏ :g}= A?@I l?@C"<%0Md 8#(m?@'`sI@DͰMUh>OmHaoW?@&N?@?@?+%U OmHg6D~)cK?9?@_1n`Um,H_mH揷P,P 5b?@|XYm˗_mHOgOw^ G/6/?@$!?@d%FV^_mH+5gf 밷?@qK"`:= ?@6NZ`4olI5gN@S:3ί?@f?ōo ȡ4$1qY?t@ O=$rb K{ӞvD '炏ÏՏ"?V N x4(1(<1>=ŸD?8Aį߬X,@@cPx114<1U??$yίU@?`0 Q/TSi^1ATs]<kQ'/2qqGaQ! Q#`Qn%bC6a i@bpCa zq`%>q,W,@Q"c,qB@>q݁oӂ_=@`{k*#upq95"+aq@uo5ubO lҊq徶#Ž#UU)*/023456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdTUUUUUt4l"H$ @X,b'@ .9xcC- A;X^ !"#$%/023456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[J\]_U`abcUdUUUUUUUUUUUUU     *t4"H$ @Y,6@ :7C-$ A;Xn@GRR@X9.RRH<(H<(nE|GRE4Y9RU`81lZ?#d@X*a@ LFDTyB Фuh$T UF"H$ @FY,6@Fx3!T!!&'*U+024U567:-[2ABvE.9CGHIJ*LMN_$sUQR%(!TUVWXSYZ[\]^_adb)!$& .vl:q;0?}t+ 4Uu___Бd!! @@t:N3@@p8@@x<k ?)/>r]u`VT?˂цۂuы o,9&E1+?JJ4k!.//#{!k*y? 1 ??T??"J)Oۂ)Xۂ̡X˂c ˂즀 c` R?"2,1,6UGI  ;6:8&%@$Bu `, @13q(/O%&aEno%_}a kZ&ooaY՛e\o ao^%CU%&p~~XA`rτϖϨϺ_dYkdvҏPB2Soeowo(/:%X, I/[/m////X[//&=-??FdKO]OdOOOOOOO_%_7_[_m_______o!o3oEoASeot?oM_/ߧew߭%+=Oas';mP C5+s:%~ʟܟ&~.֠/+-5"3 ?{:'?:??Q?c???`r?#"ǯ>F;U-;0BTfxm﮿ҿ?,`?Pb%ϘϪϼ(zp;?OOOO*_<_HZr_~ !?QI$\oBOR._SG;OMO_OD\z?@@#U:LDybt `a °;?bQtsuw<Su wgcpl~=O//P!3/E/Z!k/}//////_/ ?? C?U?g?y?e??ro??? OO-O+=OuO\_'__O@__M___/Aϕ__w___oo%o 7oIo[omo2)ogѩoooD LA@Rd]ou@ ~xFhԐxĢr?lvKbנ*=t?=8/O@!3EWi{*gQг.֠/j|5"  ;Re#5GYkvx+@@@//),?°;w&ɽ逘U///)*//ƾ` L~nrCo3orzۼb<turS7`` F~l3"` ^A.%88=G"?4? F<GFU?g?y??6ҫ??y??OO*O4o_Ͼ|ooooN` BTfxz DVcu_q߃ЏEV*JR@@f?l60@aϮ6ј( ?ȯG?*< eſ׿1C/gyϋ!?ϖ -?QOas߀K/[OmO);OOqOO3_%7I[Pmv1T%L@.N"Ia J&TES!X *gd @ @ 1A1aAk'd<58WJ&X Go ։LPL&@@[1@@jZoP@ ?ptxu?G[Ir;@Ia'@u`FxyQABCd,rr ,>-p"EXacIar"O!,te1bB8!jaO!)Xs8Y~ٯIaRcIap pKcKpׂKppp p p p p Z^^ Ia($3$4aO!g /o.ƒ̀nA{Gz?z S&r` S;aowCon+ or@zq` UFPl+ PѠtUenSRu0g`` ڮ` 2RRqk` AcnetA]*Q` Ωq8"W`TaspѠrrcy¿ԿK`Ω޻ *̫Sy+ eU X+fstm Yp`a/ `ΩDP>ciՠ6Q`ΩMѠgiin҂aSxT?@@@F BP(?QLPnqs5~sb qIaug `4?uPrj`@m^a=>RyQ1aBDDaab܁܁C"1UP8ai10E0`O!rCѠ.F7!B҂a1W%447*Z4 ]],~hT8e[/F|$//r@/?+1Cu:9)#Rrq5pqv6?@r`3R6`@p4@\5A`a9upXaUu`NAg08d01wP`aEQrNC`@rJ$,n(ׅ&r%sF@畤@D"ؐBv`t`O2а`0-q#`ImrSbq3mР܁? \pq?  paҐѣacאB*nVНnR__?\X eې"c)j ;aBчf`jp Vo_ __o ejodoooh(qCG,gQen[bt @;a{-)bn ;a\ңd S'o{UzSe }s;A=}ܱ8k=p!db,kOr v.4D.{vşןȏダ*@hmkצQmujD 107 mDŽ;MΏ{:EjFv(7+ߟsvϷϼJ\ߒ:~J2_@DR;}20c3mKRaY~RaOFxcAn0"21;10;5=D81aED22a`a2^^~H?DPILDpJYD##iLsDPMD 2NDOD!1-qQDRDd1 Cϱ3TDUDbRIVDWTXT(YTZ)T'"[6T2kqkq;2]PT(}}_jT!!) SQ!“R q;TbpGD1?Z5MA3 21ٱٱ228{0#@@D"?)oS3&"b, _NڿXb޳jx{a<]ĊglTD~U;1BX~=B85unUWB8(`@7.?@sRs26u?@^Gk؁D$.G9ICُ,Oj+hZQSR>?$6 &X/ o9Vx!ާHh#k//sKχ߮ ` LainFn3` As7a/&.A4` mp @-yQj"@=Oa9H{F#PP(?!3"<(?W#"@0T3T/UC @i! woevPd* "aԀH"@2mDz1TLC~1 5?"WpEU;1/Ur!K8BORG_SE@ h[/">Pb E*V@@Nt.@@\.?@M&?d2?~DC?Q:IyR~=IyR=S6JD-xUJĤ-IJ#${X#S0P贁N%k流bQ"S<{#~1DE?~1Oaq7iŧquAOrEaoFo Nsno||zo7g~$J<8ɄCTJ#ⴰAp6,@!Jq2HO,O.ofgH&&JF-? #lt EЏf j8)av//dXiŔz13t<_p4L)iGIA@uPO80*ԗ8L@ly@:|@l6I֌",Iu9``6?!u=`+\)MSIOJ…ALf%uQ#~1QUq_sbf%A[M__%Uۼ۲1Qe=Z)V_Qoo8__ ot_]W___`p6QoK"?qFUR)a@)g S'eXPPT7PA1;Pq$6:Z PD H?@ t@\.?@Mu$+չ?Q.ɀtq> pRLpVtAaXm\׻FW($mvׯX ߱Df=!C,ٖ-㳮d&3>` AcC2SwCo or Pzۘ b( 3#SC`` eF=Pl N` z,!Zdd${b!A%acW,VП(/(<%b߽%-X%!21/$1?C?\9O90$AA%[-I00gmPA%'eTaF*!Bt,@raqȿV`h_<'ee*< 1/C/u{//V.N/wVӿYA97&81@F Oj|ώϠB2ϋ f?l6+=Oas߅P xq߶:゚Dc2DVhtB1m1' ?K]oY#5GYk}yOOO/w/? /U/g/5G//}//ş?-???Q?c?¿??? 8?7ۡ? O/OAOCP5cOO p8@OOO__'_9_ ƧL;kT_f_x__.֠/Gṹ__]5; B_ۡm_o+*1oCoUogovۡoo]ۡoooo* N`rh&8J\n|ȏzCXj2Dz0BTfx69ܯ3%/ 2NBOR>_SEgDVh`z?@@ҿ °O;#2>2<SW2SQtφϘ___߮J-  $T!;Mb]!s'oK]omo#53?E?W?}d/Ho Ug7I //@-/?/Q/c/u/Ҧ:TF/Qf{ 1lukk9tpF?8@r#͆5|Y7I%Fqu H%u- H )#E>⢐u8W'ExQ__e__/]uajbR^pǩ1A hA~bQ߁'gf''׊D?%+-/d<ӹ;(@!VQ?! * aV?trŗF7Q)yk?qQ;7`yw?<߁a7""NZ֙?P$ o?/K\°?BsWseXW?]4<6%Pbtq l#e#P.eHO9lOXo|OU@Qlւ}\ O!/F'[?dO$XE>N 8gWٳ?Q]OK?!qc]Dt~L?q]K(|?@M`%,3?Q穀( Z?u?+i0?iE;i2?D?%(Pbtuv>D@/ǟ%/7/I/[(D1k/HZl/|>@"L0)/ ?O3?YA'?9?C @\?n???0CU1 r??2͍M-OOLOO_!_3V@_R_e͕x_______Ǐo#߆\onooj϶ooooo"40B| ߐ$Z|J轏);M_q8K %M#_HZle$l6?@_jZ`[*ЅBP(Xj`u?7u#Fu#*B**B8 a6-,ݫy!eI!M+?r 2LE5tAU"bFZ][gE~."*Aӿ{ϕ8'/߽20+A 5lO/OAI]UOvnOOGa?O"OO_gO'_O^SuemPnt"1(Y"FQcuj4__<8{XR7/f!3bTgUfLfw?)4aUaQ!3EZF`dvaQZ7ݯ /  I/[/m//////k/?#?5?G?Y?k?}???????W_i_{__COOǯO믝OO_3<_O _{߯Q_տ翙___L_ooooB :oLo^opa~oϯϴoooo2AzT懡nq'6)w ᗥX,n_COMIm rupE"|>@@@3\.wv?Q߰;ȸ Ս逘a  `֊Eϻ)*|і` Ln~Co?orEzab<aSCí`` Fl?)` UM5?݀?G`-! A#NPNk\BvkqE(B.O Qt">V q > 95!piQtց8w&@ց&͆PLRU@L&@@H$0@@jZ?@p:NW?z悕ٜ[r;>o1>'>u`FxLpn а5 })\)\ْt@m䑮 d1 xF` Rudg `:""p P˄Pq`!z~X#k[ Н٥mdPJȬaubzJDdqty]P|x"Sd1T"VV932UU񺁃7 $ 9'$^:4$P򴑴HrrM=[$)>h$8{Lށ!⁏iRNNRd1$d桇}xZT ,@Ɓs!9PP| C8?'+5L88Rk!!'>/$'*/#d% ".4?%+? 6g?gL7r%^Ũ5}UeO$PDOO|%O _5!M*3Crcr ?`_@r3#ʀSV΀@萀>@ ցUK9jY%)!?70UuXK?q$:#tP*QȄjeqrXcƀ@"j(rPOF@畤@Ki?A$Lx7"tBo2P7!#kIwrJsb{03w_ â\_ ʁF!rdvdr_|H,3ELj `*!n2sڼ6qǼed{l @E)-3 cEf!(N '%_]o sEaGrX]ސ+b6O'8>N 8Ͽү4)JRwˋװqm D1(:Aw!EWƯدPυDOjPۦۧ2׼A5(}ۦۯTfϮDО %"}d?z0cS썠K2\c\o$#Fx3#5s>ϗ̲!vψϚϾѲBE==$ߗG֧A߽߫cB F ca„va̩lc{bJ\ϓUFh?F2}4aGi?!B3%JR6XF~)F9y67x,-t7tI萕6־b$=_ai9׮FO=M(·P-!YC l F`0(?/W 5 /nQ //1%d@/d/T&Zb?t?=U% ,&&? T&#&5OY ?]G?aOO)O;OMO1/C/ O!\]GoO.{_(U 7 oT_8?J4T&Z6p?3?+ooᔁ;dCiZoloVar~oo2v~ i1K2\i`(IzeWJVe Grޏ)ﱈ⏨^pʏ_5w?@[+tےOmV?@ѱ ?@{?@UiԄx?tlXuXunUk-?@Զ?@E|?@?ߙ>Ƶb _mVel?@֬@q?@iL (>D(%mVY-?@F9^?@e"9c?@?am+XunUI~ (M_:S?0JMO_mV?@s??@3jl9]yϰVenU0qN0I^cǐ#I eGxI2nU[O, ?@|:Eǐ6\,k?@kY;sߥnU@>?@ϧsT)$8gCic㯐otK@ &d&&Q*Q .@T&H$%D'x<OL# uOO}l9)F o]RU|_Ĥ(8g]Ciǡ[rFx@ǂϾ@nM畤@Kiě;dCiˡŷodl6/{J1 Y/CQiĠgA8bTPzd%%9uChsrA'2Kq0Cg!tۿ0=zn"8a` TextCo*`orĠzH2` L}a0inF0n0J3` AsA0aO&8a4;` 1mp*`1MAjĠG?Y?k?Ch{76F-poP(?6!3By789P>"_a%-ĠndC9uB0gB ;@ewpsaRm0 A f2 UcWDaPt0dp]dBa.ޠOY@UiRARHĠEQTǡ-P@^q]^/d%!hQ(=#BH/Z/l-8T p @ޯd1@@ eHcg7Wa->!u`uߠ`bcu adDhh@5!bbasbb%t]tQBSu]`t?BsIvSruà]y!(L&dc2k`Bg#GxHwIvRznwEr,!m 22?sQ?\._?@PA*_Qj7zr`BRkcPr0u;@d?bpE(=7K!TD5QU8# 03%|YTOow/ E/-*f6USrYyɠsB)nEsGQT__]k^a@D Ձtklf @@}`~o?@@r?>eF6Wa%{Ms:?;?RW%wyuTNA/YaBeWhl8ϱQTĕwwbaK#eB-I{d:@@ [>PjpwMsB?@֨U8dr|AtZq ($z˦fttfozfߐ @ˆ P!%I6 iDI[mѤϕѼB1^aOOn?6X9*???<̩wQv5o/oAoSoeos{dbX,@ 0@@H$#DYejDrSOq9??BP@ &dr ӳ & &R/> :a});q%7I[^U0P贁Nky9faAEk ai+UȻLK:LK&ՙLK7@aLKĮLG7JSPPPPZPPPPP3%7I[mwIώ.@Rd@RMrpewdaMs(? v)?0vAB3v+?Gv2B[vBov/T0(v vvvUv3v3vGvGv[v[vCov///U??(?:?L9?? OO.OqLOqmOOGqOOO\PPq Qq"4qla%PGq΢PPoqJQĔQ5* aN ]1PiX}T$BOR_SPG2}D`zg@\.?d.5?Q驀iF ;Ȁk}k<S̴k--H~Vj?|??ZxTe󗏩V???"q{È-]/7`-lćUϏ!É|nM-ԡD"®!ËN.Te7襊ZgTsāMoВ$سT{BN,,@oqɯۯE MQF=e$ -4DV {E- Te-e$MSQBa[_m________QKoo&o8ov\onoooo@|> @@l60@@~ @3@`e?$ob2s.pv<&6pvP&7pvd&8pvx&&ss&Rss&x2&<&<&P&P&d&d&x&x&&&&& 2D?hznϰ-?Q U@BgLS)cYAGYgC? |z="R}- /AA`r}A?(DV!^| //0/B/T/P_b_/@^Ï?/\ʟܟ2?D?$z??j??OO%O7O@IO[OmOOOOOL҅TҴyEWmiCQhz_Yl6?@jZzJBP(ŀÔu?ea(cu!aChuaCh1Z]_ oĮ1o IН1HXFRi_oqb 2⃀LFu9"qU҅d4@bEve[mLĞooeڱ^joqqa8G/ш@K+0aOauĎB#GT9SueڀmntB`2@P Z$6HZlqBHjCxx{<&!3 "tulI/սA/OD/O/O _ *VCM I)PA_+=_B|>XttWl3__$c?°;_o~ g逘NѠ4oFoXojo7B)*o!m ` uL nCoporCzNbO<NS^@`` F lpƒN` OўBAݨFRmB)BiaGo1 V \$H 2nLnRnA+l6o^l>ooُ!3EW??0o~>'l<.@5Gv}Ώï$6Hpl~11є/Ob(>5 8w{T1-1:oD;sD"H$@@p8+@@Sb45ɿnI)@uu`u 0AN.u R6 ;`A&HfT*JR@@!z^ @@vn*G@Rӏ1S*baX?qP?Q<_$Gq tiN01z@‰ #b161)CE%@rΒ?@I֊?шſ?_&aDp3tSu*͔TFWQQ]raNmJ1`AnBcAWi>phDց)ANק*(qLA=F+OSWA Qp@VI~֝ttB)A&H p nqrSQUT9fR?FO1QCu$ g=^}jyuJoAgo%յom׳w!Rp/#`A//_CKtfGU @=7/Яg `h4(@o:)B I1LOA.M=7OOaOsJLY-%_7_4 v__\_T__*-_o#o5o#YoGNkoo/ooo 5?1CUg1?Qc [Q?oOOBd7?OOXR XR#_׳OO(:_^pMʟܟgg=Q =Q=oasͯߨnv @@~{퐱?@p qsxu0BTL #@@>L)Bpq4p'@z3.4Frpgfsȴ؇ٿ ^,ΞPA=QQcÄutϒϤ϶"4FXj|ߎp߲R. "W<_ ??Qcے //d?eyq~!D%1)O"O4OEVj r ӵ0a*)D'LB<N`nsT //W@/R/d/v/PϚ/t_ϡC=$?6?H?\~???X????fxߞDOGޕ_OܜOO_/b.7SP/O_s_7/_-_/__oo%o7oIo[omoN]1o]1o?ooo 0BTxBP(~fxz^wyCcSN xx<]1A\)܉DaW._V_@_1V__^(:QE 1Z]1j|رEǟٟ!3EWi{ïկj&O'i//@AewfenȽ ?]1&8J\PϦN|#GV0x+%Z5!E"2GG'9etMq|> @@jпF/@@A @@`%ģ?z̤j;u`?u72W462W173FY46ZZ46n[46\46"(22FFZZnn*"a;51 "bv! zpr2po2s҆%1Fs# !A|#ebup"2dO8@21fV#W1xZۆ%xnqxԂYxԖP-CPqnYn2nT4nnZnnn12\A<25?NO 8J!>5BwOR_S`Gw!A E:LMpxI$ZV_j?@ %%hFQ=t pR tTL;k u;5(;63=y;3[l?#a%@;3Tg6:qq##0P贁NkUtU;6c2x=1%2A%S4FX߂ɟU% .%EE/<:/aE];=K)$Ҵ3,@;2q5/F_) '/O_%;??_Ձ5g,??_?Υ)O;OvVNxOi!77]5-رs_'t)с |رر_t$|UA AS19?K?]?Vh ?BP(w`F($#5G Ӌ#,ٖ-㳮dv 3s AcpentCo@orۙbo(Sru`` F@l@~` 2ug@Zt T$ a'&"bر1Cرi{O //A/S/e/w///////??+?'o9oKos?ZO%OO?K]OOKO]OۿOO#OOOO_#_5_G_Y_k_}Vъ_ѧ_k_(__hBS2oYohfl6zooooooow x#v50wm$t` DVk}I[ʤɏ9- 9.@RdvkϬП*8N`r̯ޯ& ؿxnr//ȿ//??dQcU?u uϙϫϽ( *Z:ߙWAZO~ߐߢߴC-5/%/7/ kep8@.@Rdv ƧL;k栵&8!e.֠/ >Pbp oA+=as7v6|0RA ACUgy?Ǐ-?Q!u/5_G_Y_M/4?/X?/[m//%?7?F?3|?? OOCOUOHgO0[LQczW$WLi"Q@ d_pN(?@կjZ ~ۀBP(լ&Ӕ?eQuQڔOuQZ]a6-_ y_ Ҫ.o *`1r)I²1r1i>o RLau&qqUO8wtѓbFbzI2Dj}oojoqwvujD,4Fj|/Qn"OUdk Xb'1o><DudS`gmd 1Ps@ɿۿ$&Hdj6RR{RwwvK!3"'&E@(/kQq\XDb3a-=dŇ$+G=i]I֡:PX~ߟ?@@U35?mM_qU($z˒ _Tz#o%e812]S&F^S&FUBFFFFFW(&  _Yi=8uom[enڑQRsFoR AQWTAWq7/I/@ /3QAqQ"?/?ϧu͛___נpF?7@@r8s??OO~1 )=Krru8qK qӞFԞF?QK_]_o__:L^pۥ5$6 ǨALp֢Qof''{d?%+-?%+ΏE "4F^HnjHޡ OKH 86yЩy?9s4Fh@@P/@@b@`ٿ4<iuޠw`u e@A.gu o t bU@X, @@_p2Pt:N.@7)pYf@z3Dzhpb !+&/*/<. ʢv"-(Ԣv"G#3 5Dю0j 3Vt[g tNe@Z!z@" 2b 3v"HieGG*D@r~?@Ig0k?]4e5?J&"yyF]޴aJ1RCon#@ect!@rW'@iCh+@0ۃ6ك㪑 `A`b5.-GYpN* TyߋԈ,3T/K?V_㵱AZ* b?TAHջޢTOOQl*XJ5;?Z^acuao"՘BORGaSFG GV*JR@@P( *_qѕө߯'9K/{a̿޿a(:o^8o\oϸ6HZl"_ߢߴ2DVVߟ>P'ٿdvϚh*~MbV}AG]: Ÿدt]đwt:Lǰ?@jZpl6RBP(?唀61u.0`?2u 4nrav#$6-.2⡂21}ABA Q Qbl?hT,@Zඵ1P!q 觱2!C7n?vp3dHp!1F Kl \.?Z@(M\cF@UE@^E5e4?4"$7*?3.5c=4O5;O6µ@U_3OrUդEda_sTT__rUW_oĵ1ܣ:/Ssr(WӐpqjhwo@rHCH^c|fb@w |@eIi 2rU`X=Uu?O'4OP.`!asivuErMsfBz$\ lO~OI62 t! 2! `!L#! Iـr-b1@3ـ1ho \Qo| w|^ww1 |fBG\G֏ˏo aai w( /[Af| \ޏ&1jx.@42@eGNh @A%-/ A1w9$ '![Yk  sAqCh hmIڠ'b02O0#4:aJ4ροi0%HFƸǛ׬QmD1$ 6=Ϩ²sNJ1AS¿Կ@ ώ߁@KjLۢ.=1$y뢦Pbߪ@̮z8JTorU$C2}O0@cc蝓K X!_ X!N43FxG []2,pn#FjZ-Cp!*͑=AGLA/S%r D!R& r S%r%2S% Av?srS% - ƒ%~q%I?W: -%v~%' =P~Wx5;81'=Iu"$;zQpK rUҼT7#$Lʦ~#jI(a2b0FI ^OJё)9OHCάD WD!*?Qײr|pcQ QVSsiaVp` TPPavplShitH,ppbaRTq}p`OYfr]ea,OKn qaaqzgB@Fb0wio2c!b_=,p@`{q! AQ! ATqm~? BP(?rpnqn,rqAa`Aemt Bh7#IEtrbd!d!sHEtVIRt.J_t A))oLytqMt ANt1OtQ1139QtRtjaPsTt!IUthVtWX."Y"2Z/-R[<bqqAbE]V AAA A _p!QQ/1PWQ A& ;IpMt!oڊe}q9Paaԗabh{6S@Y.r@`0 ?B/: AYc,RhDrq^f?+Q?(c" AlZtAUAarڈCrheʞU]rh.!@p7.?@-n3bybfKl/@\1/jqjqIaԍeݝPqV"[ Hڋ6U@ ?"?ؖ/PrSڊo!Ir e¡ Ah!?C-$Sq,.%FU,b +=OKϑqasϗϩϻF|>/1o:ߓܮ1rߖߨߺ>%A9:!C棿Q_>%B_ q!چqȹhswrFXߏeFh?F2}Б4ai?1 B/}%JR2TF~߿%F9_y67x,)p3E~2^ 9/[qe5&FO=/M (`)U?C0h\,F:8O?S.1,?/ja/// ??-5dڋo8eGPo4OFDP6VFlO/O ''С7t?yVhRqnz.uV e-GBleԏR $EvuSFRuCnڟ%ޟZl~𢟴Ɵ[5w?@[+tێ_ifԩ?@ ?@{?@UiԄˆ?tlXuhjek-?@Զ?@E|?@>Ƶ^oifel?@֬@q?@iL0(>D(%υifY-?@F9^?@e"9c?@am+h߅~jeI (M_:S0JMKoif?@s??@3jl9]y߬Ruje0qJ@I^cà#I e}CxY.je[O, ?@|:Eà6\,k?@kY7o۵je@>?@ϧsT($4w?ys߿p[@ ("Ͽ`6Q*a*Jgo$ 채Tʝ=!H!TpV)zLP<$,@Q]q߶HAmbVP )?Q5Tʼn׮>Aa|__jOFXIOOOHE l3߂$%oo'ґ9u?@T9G1YxR?$%s?uA#5 G6x6}LlUT@g29U2bnb ϖ5[z$Ŧ<2:ؑ%'@_u/@jqB+"9G'O4_'Y;_$T_f_7O____M_ oq_gDS̀gm̀nt 3ְ8!@Rdv"!j oo>|>x{Xw?!v!3r:w;v2v]ODGF$ XU/%#b>{{yI/[/m//,6bX,@@"-@@H$DY@T? &ǂ//C2ٵ8ssdqsSkvfegmvÖnvזcissÖJÖזז3&8Oʁg`JO\G`Ƥ"?;M _fepNvrOO.O>x S8Ϸ6dqg4AׅÑ컶Eב@gEPЀ07kЀatЀYЀЀKЀЀבovo}(:Pg88T960akBOR0_`E0 5%֏mT`z/ Q]t> Ÿ;`t 1ͣQ߿<SC8=ֹ1g(:7J@ӯ\OK]o(d<8[ݺ8ApwO]OOIHDow/Y)tewhqpDᖴ[ܗrLT$1 VB,@qU=O\x-r+//񸥡%/?/I?[?\v>Hn?v 1CUgyߋߑdqdqOߤhDL`A.Y/k/}/v>[|QөSûZu??E1CUg#,ٖ?-㳮3` Ac`etConorlPzۍ_b(@S"buK``` Fln` Zo2eaflRdq1|ICvB|$6CqQc"Cq?탡VL^m'/!/3/E/=_O_i/'^`?/\gy#?5?k?W??V?H?OO(O:OLO^OpOOOODOOO|sgUP p=7-_,u_K_RT*XTӺ_T_ 7a!o3oEoWo·#.?/oo5c o zoo$6zUGYk}uUU8J\nȏ#TsT.@mvП<'fPT/./үd/v/ - @WL!Z Sl~YA߮C fj=;=oHm-%7`rvm#5GYyGYAԅϏO;l6?@?jZհϳ֟pBP(Xߵհ곔? e7}uvQXu\QXZR_ jl_NĮ_ K̞V 7rY4_R ҀLce(waU҅dob U []_oe55;/jKHo㪀aQo .@/U}=2ɕ(/ɠ}s᷅kQ%H}s3WxjD\kWSe/m6ntP2@_@gyƯ,د8jQc {r{K!3a @ߞ/An OG+OOOaOFCOME I~@O2O|>HdW#!_3_S;Q_c_E逘____wϗ)?*_v|mb` L^n6Co @or zۣb<>Sk0`` F^l`>` O 21@6M~2#iG0oA1_F^Lgoyo#ߝo^"oo>]o"4FJ\?&_}.Co_.@Rdv]/o/_.|oE,ҟs}#rx5dYyůyAo$EJopSՂG HBe)~0q" @@k۰V-@@~ @@`S=?EHr$R6qp熫qꃿ1ӦstuӦӦ*b{89!o<ͿKq.^2bv~ !z# e3OO5_1e; eՂk_}__ G s~5qe1e˓%@UyAge8"rP̌ ͌ 1܌ O@rތ J@2 q,P //G7(BC Rx@_4Ex@N/%7/t6j?@ ///,q ?pR ;8-$;8L;Gk 6F38-!X?j?|?B=s1`r??=iM&ʆOO@TOOLT_T_._ T_f______o},o>oPoboooooooo^E?#?Y?k?6H??~?Ə؏ 2DV@!%1CUt,&Un A1/// ?BP(&8J\F($xӉ3,ٖ-㳮 }3R` AcpWenCl:zb(Sbu^``` FP@lV(~` T21u@@t¢"Q# %7Iav|Yk}R!!*<N1Or0/&/8/J/\/n////___/??>O4?eį֯|?? ??Tf OOBOTOfOxOOOOOOOF:_: _D_V_h_ zXB2_ew_fl6_oo)o;oMo_o` x{oooooon 0BNҲ]oˏݏ%7I3mǟٟ!3EWSewߟQh/A/!/wW/i/ѿ/+=OasυFsW1?P zXC5=hzUp8@߹ ƧL;k.@Rdv?#.֠/pT 5s }z9K] oz1uXfWl!3EQB1`r7}1/(:LpB$6HZVOhOzO/T//|2/D/ğ֟z// //T/ ??.?@?R:]Qc?u??J9t6<7xyEGmYCA@1,>O ȡ?@կjZ2-~pBP(<Ŭ2GÔ?UڱuA-puQ{Ma6-O y_ ª.1_ Rb)jRb ֔rRY__qR BLQe9aaUp҅8TobgRs[ת__ޒꨩo@MqqQUg/}r"pv }>y 'H@R3_9]DȤSPgmT 1@@c@Ưد#$5GijV4**{O7K!3&'f@I3rau y4s@#N-͵ $ڷLh-ŊMIqP:@H~ߟ?@@L#%`ƎnπϒE($z˳ _Dz#4_FU8!!2vt6wt6U266666nX& $AO3zY=Y!_߻\Beݴ*cg_* 1rGqUD1 WXj Y&NIr1r "@/P/ŽOOOpF?7@@rȄ(c??&?8?!! ?)^;rru8;.xaӿ66`A{lO~OOO[mP%EW A~Qf''t?%+-mՆTPbtΟ(:L^pʯܯЋG_Y_k_}_____Y1q__o!o->m\('qq\C o,Рh (oooT0g 2vTf~6t6rT0O#AUBz8bE{zɰ]`@`At% 1CU lse#P.U߿/8+X/BT7#Qlւ}//F'[?w1/% 8gWٳ/%?!qcc]Dt~L?c=L ?@Vjl6@BP(?dƖe?ϭru40`L47?ruiQ4RbHfU b8R QL@2"UYr7C3b`=u}u[ Od{|{rz IZ8QJoA/-A/xbici.-gˏfeZ,d);-yݏ,"F~TLUSe m|n?t 3 Q_'_9_K_]_oQHt___aΥjD({ v!3@E2uEG}-Tu_S7@Po0oBoTobX,@@D"H$)@@H$eD@E?Af¿oo׈bdc|]H]b\t`p]f]]8>HH\\ppJa"a& j_E 51xa%"2 " 4eEK[m. HQ\ta%%1SEFPpQ\ZqK]ů׭1i_[IuSH_Hы)yz9`mBWOR_zE h euχϋ`zM@>Q2)t ?°;5ta"㔑'<S%' }YqIu 1 2DW"("q<H0x$EHDLu2/D!LEw&ON%G@5N:_EOLg0:l!3%TTTq{qP;,@1q*(/$/O6T2W(_5T1vk}&ooT1Iuve\\ooT1vo06K~mDA*<N`f}q~THO9K˕E09i1oCoUo ` @@@ .@@x< @ŗO Wʈ^u u`bu !0@K"qPG@1ϓ B 9{B*0ʥBxT#?EBP(D$0/!/0ԧ?ޯW!q6S2.?.@a1?x$ চA$O+;7`BackProundClrz۲pb@29I4Q?<*{G?U0-|?Nu^AqnD^YʉO/U/M*_@2IPO#n~L5ҶW#7Az Xja*!1QБډaDqQa u)L6 L&m$-@@jZ?@p:Nk?ai?r["r;o1'}u `FPo?uT5_pb%)3&"` S݀aowd"j/|/` Fpl]@Pt en&Sѓː`` %` /21x1` Ac n ?"4 )/jB`%Taspr62cyw;??#!`P)?;d" +Sm0y]@e&V2 \;XOfQ@s tJ1K \;YOO.q `)DpAcm0i'6q`U)Mgi{@il1K*6xjH,?@V7F t?DȰ%ELpn j?5b א@ym$hV ^hV ɱՠF `% DURFQg;3 `DU?;` DU/6ၜ6ta?b4ɓKrcAT6ῲ26Tưawư>Bbv<uzeпQqưqt҉ Hq Q"6Tɲ113t! !iQiQAAщ^8t9t":j›mb@YYbѱѱ >4bvр55@PdPSrƸT ,@m-0|Ũqpm6mHqwC!ZA4Ώp[tmMr 0Kl @(…Z66F@[@d*7Tf ȏrt*Ǐ Kr4&^ȟ؆mІY&%[uw'jŘ௱c&me ce湊EMrrqmQ !~RR澿@rгW䶱@\V@  q/s6r` @wU0uUB=a4pP_qkAţR?l4r@ѽ_LtpB2p#pkI1PrfbG31PP~R ŢQϒW UR FR+Wl[prP[pQ"HQ1[RHOqQaQ |"4F咁"8N"ӐRэǰpe[pۈ @aE-O# caRD N 'RA{yB sac~\rGbpROp/C TZ Qj ' T  _!P/E5fR1Pm'D11DV] Qa?s?P*//`kjl;5;N7];QD//;;OO?p//O5_//`B'9{uf&Y}dFcK*_2xᠨq?xᡑqdFx񨲄qqHevIr JIaIaasLđMe NObSQPRĊf1T6qUԒVbW(ԧbX$5NrYBZOMA[\;a]vQ_qOwa1a1a@ ;iRpmĨ&Y11{V@Y.r@?`0 ?OyLrq~C]+Q#?5xS6rqlzĤܑUa@ؤcȨU}Nq!@p7.?@-n3270Kl@\QOi݅p$aH!v6r;+U#VU@_r?1cOpes aiQ q5_ec @MDs,NuU /FU,b@'9K]ok*F|>O?/Q/8-Z/ !EOz{ 5w?@[+tԩ?@ѱ ?@{?@UiԄ?tlXu6Պk-?@Զ?@E|?@ߙ>~(el?@֬@q?@iL(Ï>D(%߉Y-?@F9^?@e"9c?@am+/ՊI` (M_:S0JMk/񿉶?@s??@3j`l9]y/(rŊ0qjI^c#I ecx?N[O, ?@|:E6\,k?@?kYW?1@>?@ϧsOT?x$TQ_ɪ~@ x0BeFQ*"9 HO&O8OJO\OpH$ÉA1`Ux<yhO`__&_8_șO.f̖O_____ oE_bmy_.fqܖ{oH(Ty_w ooob+!@{ǞQ@n~0Kl@\.H6$Wa_ɀ9%d?l6M{f uŏˣiTZgTooi_waˡ'2ǥqƐ$_!Կ0UIdv"T` TextCotorTzd ` LainFn&3_` As]a=&T4` mpt@u8ˡpEs Ÿԟ 3E#UgyIeyḟѯ$6HZl~ȐaPduUecPYTpK!"LIZjZkypEEe۶F BP(-?۶!3ۺݾ|>JZrwW^pba~o 7*DL[pAO'O9O2D>ϷS|Lqe]swZu?E#ӥC,-㳮@Rd3 AucEDLb(V̀*S=u/`` F]lD Lk` 2%ߝ ݝ±ab+qSxaG"ba //ޝbaE/W/i/{////_/C]vz=]~)?;?M?t???????Oo %IOGo~]_O]|#5OOk}'_I_m_Ȋ_߮_____oo,o>oPocougoCoo@WU=7oĿֿ1DKRT*S_vt ?qեC.֠/?Qd5t {܊h'9K k}"἟Ο*@SwLaAc(:?YAXj|C "= 2c - .@RQqQ//O9/f2n///,// ??/?A?S?e?aosoݏ?MGnԟO?lۯCOUO#5O˭O{OʨO_$_6_H_Z_l_~____ \)▏eT׉Vg~yTay蝯o9l6?@OjZЋ[?BP(Ց֦Ӕ?u`9u2qTxuqTxBZw &( B ̒ZB"icyp CLWJ3UⅺEQCbVu!e[}@]ԯuozDڬഏƏX/ џQ\/hg}CooOs°; _EWi{HS)*2}_` LnwCoor@z_b<_S'P`` eFl^` _,R߹QݹVcR "yGAfl#5YBQtmQ<T}Fo}N 2DVhO+OAPN8Ŀ}L?QFX/ߟԿ. ϻ//5/G/Y/}//5aaKAĮe?Zmu,Tcs5WI}plB @@nf+@@~ @@`?ĹDVd`amg"{W_qjyuxvqXƎ]gUg{{Uƕ2@zx1+\ .Bbvs4GAz_߱0oCHo Ue{V bep'9K 1}u{뮕W5_qOEyqWuq5a8ނPTހ@Tހ `cހ`ހ\t1CQ?0aCQ???cg8B*0R4`_E4` OROZVj?@ uOOOLa pROKKL;kQFS_&_8_=fQ__q_]%mBoToooloTCQoo-CQ"vXj|_9? ?BTfxҏYk}̟lbn?OO_'_]_o_:L_pʯܯ2<{{o 34U* A1CnOOO BP(DF($4FXj|ES,ٖ-㳮3Vr` AcـentCo orbo(ЀS;ud`` F `l ` 2텀@>ˆqYC]ay\[/'/9/K/Wr{f/x/;{////// ?_.?@?R?v????F???OO*OnQcu);M_I/˿ݿ!3[B ߭ߣ$/????3EO%O{[O /ASfr䀣_6xC>5$?6?H?Aup8@cu ƧL;k 2 3.֠/(Oas 5у ؏9=O`r b$6TfxO/,/>/P/b////////??o$o6o^?EOOO?ٟ8J?O6OHOȿڿ~OOOOOOO_ _2_@D_V_ery\ TW4g)yQ@֯Ao]?@կjZ~BP(լ?ruua,ua7ma6-o yo i.o ̷-rPyv bLq|qU,8b#r/U[@Zl{LNTՓ dG v#GY/n.K",2AH מ5?ŜaLpTSpgmt 1Pxl@ϔϦϸy'Ǭ$%jJj/|/ܕ{NjK!3z2'&"&?H.ȕ5}T`?3B Mf ߛ߭߿GlIv:a`eh~?@@ŭC״EoB*%fD>yU|8!乸I#˿ "tQg Zu?P{"AtQ"ʣA{"'Eג%'tD fC,ٖ-㣳a?>υ=3` Ach0entCo0orzs@b(s@s@ySS `` FYl0s>` ?2|5+߉1݉6NdGqσϕoB{!6<ϺTN MN1CUgyߋߝ߀WSI&f_I.'9_`_o_5c[.oII,!Wim5Yhve 2A¡ys Q*%py>'K9\cu!(u!(88QvLs/sCU%//C)C:&/u-ehhz,==\Wi(@rOȄȘýڟb&{( )Ӓ+$E*…uׇ8+!2 /2/'RA=OOo_0_/E]-jʕKQH`!'HAbQWf''B4?%o+-銕D<ٌ;^ŮڥVQ?;Q * aV?trŗFQ)yk?qQ;`yw?<a""NZ֙?P$ o?K\°?BsWseXW?]4<6%ai?y??ե=Q2QU_?ޒU@ ` @@0,@@?x< @gù w9J-uBu0`u |@b_u ֻ P@", ȀyQc*E VHOZOKIR՝˜M2B?DŽBP(1BBҥ @JTa8P2qgq??\.k?pAQ[7`BackgroundClrlQ~`byUդ8=5_!ZTB*Dh#S0%|#fvTU7R??Ee_U%_*ooi@Ee2KR1c1V4LR@ Q4XŬQuS x QABʹFѩ9/<71/s59Ld2L&@@) B!+@@jZ?@p:N?8K[Br};1'up`F{?u|@_`bjRpQG@VBc!}#9}#(}#4Q}#Cz&de"t"o////ZRj5\U)2?(797ZSQU!8\ZRB FBD"aP=&`Q6ayN`ACB"#2ޑI^BZFj"^B"^BZF2^BUZFZFZF ZFU ZF ZF ZF&mBj, _1_C_LUS^VmBR3J^V4aaSs/o.̀QvA{Gz?UQK1& "` SSaowR__` Fuilްat9`enVSGA`` .e` Yo2%ߌa݌a2¯` A c=`n 9`|od2 i6o 1`.eTasp r.bcyko",A7"9;"c1"!k«!!GtO11ɢٴ:BT,@2`5"1A30!C 2aqDϋ6p/3 (Klį T4@(AAF@@Šҫ3"+n>@(2*=.+#M+4pߜ5.>N52F#ՎU0ժߝV'#㕀(A"O/Ӈ1r_ぃ!ia[&4@rFZ'@jE 7@__/-3"3eͶ10UGpur 1j0_ ;ci#W1n3/$²tMK2My!#MIrb3X1Z !1Mw EE k£0 4耝0Elaa$1#& t*///$/6/H(!ڌ0=&e8ѠJ$ @+9-. :$ 'T/;?ŷ/*(?O Cs A7 -MOl|+g'p0bO27N886;F9<5HKOO862Ka_s_dOJVCO _LOFN(꧀]+EBm5y*6D?)1?3;,_>RHO O@ O"ooXOjO|OOO$ooGGj?k86k9XgNRk7_{_{86&{ASX__.o@orb>Hϲ}1߼c&~-K.@@+ڴА/F/xg@ еFj/ZÛT8c!GOJ\nAc ?rdYѢrN m q߄? m2vu@ PqѸ_@߁  ᓔAKեͣN <q#1߇\c+ߌ\c^g!>r76_`ُdWԁ`O:f",ߕnzB@P0o!~_=@`{ዝ?T/~? ?BP(?Sn*PPɂͣ߁E!!AH@I0J0LWM N)&nO6v))!QP R`]#kkqTw&UVIWXĢYZ[11]%_!||@ű!//!;G߂pupPuquq⪑{@T.r@`0 ?@Yoqǯzs]+Q?H&'lARUNpPA`.Uġ @p7.?@-n3ڨ%(Kl@\$D!T$j%s-A$* @ۡ/Up/7U@բ??n&>y(y?w9>5|?pD!߁T$w/X1N7y=FF5D!H!,Q'ĥpH!>5*?O/,FU?,b??????Z_lRO'O __-_?_Q_ZF|>/__]O_)LlD!noo,o>oPobocRԥo.oo)Os]]v9Ov+=Oaskԥ_o_ujD!sNT$~p^I.? j|__%K`Fh?F2}`4ai?)~%JRȘeDF~GF9y67x`A,$ɗ>5۞`mx"5s ȘUh^$ϯ'[;M_qo˟zg@FO?=H.M(I忟KFCpFȇ<\Ǚxon,Si{õҿFA;Ml#xϜ#ǯl뭛9߰PAuߩ߻ÿտ'OtXp`ǝ_D !&f1.&&aqI.ďx_ 6//_O/j茺@/ .ona p/C(t/:N//&/8/J/\/;5w?@?[+t$ԩ?@ѱ ?@{?@UiԄX?tlXugk-?@Զ?@E|?@ߙ>ƵOel?@֬@q?@iLf(>D(%C_Y-?@F9^?@e"9c?@am+_guIc (M_~'P:S00JM_g?@s??@3jcl9]y0oBh0qI^cY0#I e@x`o5[O, ?@|:EY06\,k?@kYoYqEN@>?@ϧsTSZRD$7 COuO"@ ,OULvQ*6X~xOH$Oօx<`Y޳fxﭖrB :#8J\n؝R ~xVD(YA%IMI[m8١,@Ǟ@n`~(Kl@\.~xT-As]AUIO"OFDdOl6f3{ ֑x.;Ϣ̣iUAT ~'2q4мBd!60Q@UA s"` La}tnFoЭtz0z` TexClr3` Asa&4` mpь!8-AVDsڽ)T̢ E [ju;FoP(?;!39TUsI;^>Jr<b 7 b*fpDZwqANAA` RW*ieFѥdfҰwgp@t e emА !kۼ۸MarHMaT"AYA{]AsѨ#Me%ֱP TaC=7p@h@˷<ڿ-tKRT*?@@&d2?@M~ɺҍ?QA]tQ@` °;7?tU@Ԥ̣!ա)5rE[SVDN&N!AAB՝.֠/Ṃ";;ɦӥy/"~ Ac "2b(0N!=u@`` Ff` ?&!&UA}G1)s"`{3թJҠN6M<#0P_贁NkBczQAL@̣g:qTQUӴ(QUHW6%(T-SUO͆ N_UO-gT(5-f!PLqq}&Ҥ,@{1}q9!3sO0VhUWk_y5Oau[krooOa[eA\ooOa@[o0Vd1}J~RW'W(2a"R-nA2DlEXU0BThh ƏD1o%o7oZl|>@|L~YAC //'/9/K !R#[!f/ա=z!Ob//,X=-i?{?<????6 OOCOUOgOyOOOOO{$ς͓'_9_K_ρ________/AϿGo.w[ooo'%Gmk؈u[u(:LH^pĜWж ɹ$"@k}ߏdzl6?@jZMBP(?n\u"u"Zܟ KL0 ̀8H2hH21i> ƒL!Pws°;۟'?逘%ٿyz)*꙯X/j-D1` L@nPCoorzۅ b<' #SCs/`` F@lɎc` Ղ߁߆b/R ѩGޯ/, A@'/K]afx a/??(? .Cvaͣ~Ϧ%$6HZ?Qg׵~^ߣ|ew8JϿπU͢#FsA__a_B__Yԕo,Rz瓷ҙɐ$u}y.Ar @@pv)@@~ @@`5Ֆ?'*4֍}qӡֵ}rqQ-3.rbv#mmqz `(Ũ*sn1u+ e.ҸM_q Ca}quuuO[T8Pz|||1|rc|,| |ao`Bo?ooxUB%pRZ_PEZ0ϬxDZVj?@ |B pR{B2BL;kv(B2:L^r=ÌBKhz ќT]6H~Ưد_o 2Dohz¿Կ???@'ϒ߈ϔo;M*߃`rˏߨߺ&8bb!1/$1/!?3?cttGUP0 A@1i??5 ?BP(???OO,O>Oj@F($ZOlO~OOJ$,ٖ-㳮OOM}3|` AcentC]oPorb(Sau`` eF2lP ` 652ŀ @ "dsI!__ CAƁ;_M___q_}__aպ____ oo0oTofoxooooolo,>Pbt G^p6H~$6HZl~Ɵؖ!$!&8J\B@2lGYk5fl6կ /Am x]oOOɿۿOOϹ$0"?QdMw߉ߛ߭߿+_Oaso_'95/G/Y/h3J_oo#oYk9K @1CUgyؖɏ\Cd05/Jo\onog/ƥp8@/////// ƧL;kA?"?4?F?X:c.֠/Qu??=5 _-?cuʗϘ?:9?OO'O3BOTOzOOOOOOO __._R_d_v__$߬_____oo*odDӤcx-鏣5hGH#s0P贗Nkvl+CDtl6f Y~+D+?.vyNQvyNQ8SRA\Sy32Vv6FV"R(V2V2VRFVFVfr&_+QuekO}OX sa g@dFQ)gam2O/OAK,Qfu_EZ?l?l>tpF?G@@rȄHsDOO_[vatgA )owut}rzrwu/8wu}Q)#R2Vr2V*,Q!3C/H 01EĈTyKcsYqAlQf''Z%+-ۓZ<;auTVQŦ * aV?twQ)yk?X`yw?<7""NZ֙?PZK\?°?BɝeXW?״ɝ6%$g ?M=oW,x H ?QcuP?g v~FVtFVM8f_U%BI5bb,QzPmP@*>993T885r11@!!8d9dA:)dіBRJJ>]dҞa:<>Pfnr`J cdR|R=-T,@V aQPρqQC ҃!opc˄Ax[2 ` W@(k^^F@u@u`aag!VodRTg*ocebn4#ekvvfwӱAu ڏӱE3EtGaUZjn[2ra 1'22l^@rs㠤7 *@<ZHЀp@bѕ@yaXݱRaŗ}Uu~"@fdQ@]ϐQe!>2;r]Zqwub t`P2`Pϐ,aѭ#`PIZ0r-bpp3Z02 $A*7 52H2aTû7rG0G8 1KȨ1ZŨ;2q xʚ1/1ƯG '9K]oaK=߳axwKڏ4{ǏeGN @n-xŽL [a29m9 'H2j߲ڢ 2 s<ȧ</I#pbBP{OBP=Xl}1P}&2?JynH^Z0Qm?,PDZ1m 1a @/ASjG^wzm./? /A/^?"PbUFb}opc̓1ݠKS?QޠQޯdcFxxhza 4!qUނi& =&)//&/UN//PHI/(W;&Q͡aԇt#߱͡?/?Aѕ_2Fh?FxB?F4aGi?aAtx/?%JR{H_FbX,Fɿk 2,rԹO|G֎N X2_M(Հ?>#{H5EUGiԂ_?OO$O6OHO=?~O7*^VFO=_ObFnvrONfGYcP}w@O=_O\Fd2L~hv L`Yo\]zI\+!Wooo.o@oRodoveGo;/o^fFT/eO]lPf+_kO]fhVfz_]N=c'_4(J\nvooO?ڏf>X' ~#XO_܁p?@Զ?@7?@?ߙ>ƵQ?@֬@q?@K p(>D(%οz-#?@F9^?@4"?@?am+E(ų&-O?@ (M_?@:70JMՔ73V?@s??@7\l9]y~ͻTѓpI^c?@BP>x=w峕KE ?@|:En6J&M?@kYЀ$Z@A?@ϧsT h$}1ӣ(կ]KxJ}r<ʰb 7ʰ ʰb*ʰ#za*$@`TʰZ~Wfi8eŰC 8oŰnXwŰsXg:eB kSRdlhStGBHʰ T L$&ET֒hSux%NBORG_SE hd~g٢oom̸ERTV*JR@@RT*&@@id@M&?$Ү,Ź?Q"h丹"FX=PCfGCvv|VUGD2x-hGCFՒÈY̸]x#PC0P贁KNk-VtS~$}id.W$du%`omo$U$*Om%Vߵ̺g DZפvxɄ̸gTGADNNUbbvvUg&OpAO5;%O7G`~gȰ?:(:N5eKNQraOsOO nSN弴bǻO5vǻg弴ǻǻEǷO5EbP4@0We4@4@b4@Y4@v4@4@4@4@oQcomoO5NN<լߩ9Ѐ fa[skBWOR _`E  n%{qQ`! Q8tQ °;3;tQ𨣚Qӵ<S=_jv1O5߯Jճ7O&(8JB(L8#ns? 0BTf߄bߒbߠOvhDL`A 4/F/X/Qc>rK|kQӄ|SÖZu?E 0B#,ٖ-G㳮_q3n` Ac`entCocorzۅb(셠@SRu&``` eF0lcn` o2e߼aݼfղ #Yv|o,>}dv?#|V'|^HZl/ /_*_Dh/'^;|?/|\BT/?F?2h?⯌?1?#???OO'O9OKO]OoOODObOO[_vguU@ `=7_P_cKR/T*qX~/_T_a_o o2o#.֠/^opo5c ojooooz$6HZls%7I[mb#/N'/ HQcuϟ'A+h/ /?/Q//;X/|Ŀֿ Ϯ1ϔG1QOPuχϙϫD1|>@rWL!5CGY߹YAw߉ߛC? ߾Aj=#m-;M_qv1m1"4?XFQ֍NK*<N`rOOol'N//L b/t/BT/// ?۟1?C?U?g?@y??????<HµoiuGYsAO,`l6?@jZBP(ųu?UXcusXu7QsXaZ]-_ EG_)Įa_ y1aHrY_R b҃Lv>eiRaU…TdopbuU@E[]|__UᎵ j&#ocaQoo8w /0} p {[}N強`FQ@Eƴ#}Nr2|SwEDiF2Sue mntrP2p@:BTfxĊůB8j,>cc{lMV!3<䕜y 1?7O*O HdG#O_nS°;,_>_ 逘~d_v___gr)*_Q]=` L9nCo`orz>~b<~ySF0`` F9l`>` ?2 16ž(Y2YG o1:F9LBoToxo9"oo]o!%7[s&_}.o_ -?Qcu8/J/`_Ϗo.Wo ,^pewN}ϟMx?4Tfx֫TAkTE%yoKrQCyv)Y0q" @@r8'@@~ @@`.? D޶#r-6dLò††baцf¦†֦†v|¦¦֦J֦V8BG?eQ|b z~̲ u#gO*5:1F?weްF_X_.j_x sY5Ěua¡n%֡vUTAguU8bPrQv@v@Ɔv@*@Xv@%@bv@v@¡L@G(B RS@_ES@)/q/tZV?j?@ ///,L pR++L;k(&!33?2W?wR=s1;r~??=DMaOsOwTOOLwTOTO _ /_A_sυw___~___6oo+o=oaosooooooooVhz9 k//?$?#Z?l?Yk?ŏ׏ 19њO ]U' A1@k/}// BP(AF($1CUgyB3,ٖ-G㳮3SR` Ac`entCoorα۰b(۰۱Sz8bua``` F @Yln` 2e@d‘V#Z^YYvX|$6HTRcu8?+=OsC//'/9/K/]/Y_k_}_/?W??/}5?G?ůׯ}?? ??U? OO1OCOUOgOyOOOF鑼OOPO_!_3XB2C_0B_fl6_____ooD` x4oFoXojoyoo~'(;N`rޏ&8J\Fȟڟ 0X? !0B/"/xX/ҿ ,>PϯFlGn1Ϡ?3XC;5!3E>ߝUp8@`r߄ߖߨߺ ƧL;k /.֠/(L^p5c o6zo_:Lzn1]oufl Bn1+mn1Qcu/);M_O!O3O[B/ //5G}3/E/şן{// ///// ??/?HA?S?bƶv<o=T1y7&Y1@ӏ>OM`Z?@կjZ~IBP(Ô?oUuA)uA4ȟMa6-O yO fª.O ̴ b)# bMr Y_*R BLQyeQaU)҅8fTob R,5[=琪W_i_xIKQڄɵaDoq*Q DV/k}S+H")/ɫ>Eɶ }2<ſ| ^}Im4mSPgmT 10uicǯqXܯ_8jT(gy{ߖ!3wE+aP2z4]s<?-cmS$!-HqKCMI*P:^@bH~??@@#ש%G'9KE($zl ODzGOE8dø-x6-6h2n6x6Jx666q'XA&?J3Y=k O_s]qeH贁c _ x1+G*UD1GuO#ׯ +x1W+ /dɵ~uOOOпpF?'@@r'Ȅͩ(dc////X );b^bu8K;hax6x6[A4%O7OIO[O&8J\nL%áApQ߁If''ׁt?%+-銤<2;eAVQ?q0 * aV?trFQ)yk?qQ;`yw?<a햎""NZ֙?P$ o?K\°?BsWsdeXW?]4k$,@ rq֏ꯤvơ^S uquAqŸ0Bq=%mzߌvM.q塖> AGKHX@gaA.ɂXk!9S *Tl6f 0~i75?ՙM2f)%M2)%8*3LM MU  )="Q&~%eBT  8T@8 DT=%T61CpF?@@rȄ͢D2&t>1 )oN%tT1\"Y7Q"W'u/8N%q-) " T¯ԯ C/ߺpt+)"0:#!uA0߲Qif''1V%+-ۓ1V銝 lgo(e#P.olXկoo_coJ6oHoZcQlւ}/?F'[?15ؾ 8gַWٳ?;`D?!qcWv]Dt~L? UUa%a"HBORG_/SE p 6/H/_l/TEѵx@@t:$@@D4sҎu ?QByRگ !T=V4tGVGV4Y!44_F\e_wSPGNk_R/(<MDuo[cuB_NĹoF{vUumU {M{JgFE &tiHs!T=0gҴlHqLc,@XSq:x"46lVHx.V/5v5G|UuƿؿlVHnNi =ԧ(mUaXFǯ'2}Sk!kaodoUoo(NRC7֯[mu"@@RT*&{2);M_sd}я㏋*^2d:oQo{Z Ѹ`ߛ-/~[Bȣ)l@#)AEr'O&%O=O|$ __qWgO\_O____)_6Sgmnt 3p 0qW$i{!jgDzoo4|4x{Ǜ?fK!3rgf@fOXD~gD4{3y //%/7/-^@+x<@@Fh4%@@H$D@`?ń &//"q2#@v+@v?3@vS@vg1Cs{@vU!++U??SSUgg{{f&?M]A~6:D?Lk`f=~7ѿ?߂5e(N^>OPObO R +?6ՙSfgo噴᤻@nE6E$PnAVЁ4VV?V,VSVVgKVVЏro.@omLooP*7+r+nA zՉ߆9u]CaPkBORp_]`Ep KXjn`zև̀Qtq Ÿ;t!ⅣwQӒ߿<Ṣ-I?oDq1,5ίlf:@O'oB()8<+M[+'AfA/OXeOwOI_IZxc gXOKTx,2)8^1Vd,@3q q7xriK73Y+`p//7,5Y%?//7xY/Dž??.>nP?l 1CI?]o?zߐO߰\ShD^Lk`A/#/5/.@>[O|HQaYSsZu??E c,ٖ?-㳮@OWL(C$6~YATfxC Aߛ߭߿j=lS@ѱ_m-*Z _ "$_Į>_ %VXP>Eeb_Yl_r ?ҀLSeF/aU…TAoMb°RUh[]Y__Uʾ᫥Zo/aaooTooA/ }˲ѹX8}+噷=Զ#Q\nh}+O|0T"DF#+Sepmn tOP2M@ 1CUgy~8j  {*3!%3y VԎ1?7|?OOӢ7F_COMI6@NO8JߖOO|_>HdGy#OOKS°; __逘[A_S_e_w_D$3)*_.]` LnCo`orPzb<S#0`` Fl`>` X?2ߵ1ݵ6 _{rYG_ !FLo1oUo"|pooM|o8Py&_k}y.__ .@Rd/'/=_L.4oy,;MBT+}П*x1C@U}y1AHTԡѤEVioj5(OQCboأoS)CP1ap8 @@tA%@@~? @@` K?rʿDܿ 6`)i̟fcПwџbK1Ο͟ǦSYccwwǦǦvXB$?BQYbڒzW lתu~#DO51#?eb#_5[SQKQGQ WQ uyUċ1AuǪuK1ijwSUYǃau8DRPOQYP @ТP @+P H4)T_GBR0@_E0@/ooN/tZVj?@ q///,) pR++L;k&#B?4?TR=<b1r[?m?=!M̺>OPOTTOOLTTOTOOb __PB_T_f_x_[_____oom>oPobotoooooo3EWh^H///?7?I?6H?l~Ə؏7YߦYOU Ab1H/Z/l/ew BP(F($ 2DV?c,-㳮s3` Ac`entwCowor}0b(0㸱Sbu>``` F0lwn` o2ev@idТ3#`96v5|%1RY@RYx?,ƏPbt //(/:/6_H_Z_/i?4??/Zl?$?Z?l???2???O O2ODOVOhOzOFOOzOOOXBb2 _ h_wfl6_______!` xo#o5oGoVh}oo[mŶHH+=Oasz͏ߏ'9#]oɟ۟ 5篇}]oůׯ Ug5/ӿ -FIϨGK1i}?(ϱXC5"zUp8@=Oasߗߩ ƧL;k C.֠/);M5c ozooo_)zK1:L{puV\2K1mK1.@Rdv}/~*<`r??O8//$Zl/"/X/j/꟎/////// ??0??'tS<`o1y7Y1DžԏO!@7?@jZȰò֒pBP(ҵȰݳ?LUxaqauAuA|Ma6-]O y]ɭO CªO ̒@Ry"R*rIOPr }BLQVeQjaU҅CT|oCbR 5[@m4_F_U&(.am>!ojaqoo!3/ H}0% 6"ɓs}fԷxŜ|޴;}f&J|k4^fSmPgmmT 10RFc\nq˯ݯ<8j1DV b{en!3T ߑ"a-W43@fc0$pN( MIt(:;@?H~?S@@#׆%$(τE($ozI קO~DzDOE8· U6 i6E2K6U6U6i6i6NMx&?'Y=H,_iP]"ӏ=Oߤh(Q&/v %7!%u sޥ zg憆}%ɔxZTΡ"bⴲSH$,@CRqˈŏǯvߤ;0畡qCUqb q%JWi߁v*.ߤkq¡s{GϤ(%5D¡ 5H!0CUh4 ~'w%?.*2C)*2)8**R"@X&[h͡%e1fm WCU֡!Ρ%CU pF?@@?rȄ&ϡt1 )+%t119"67."4'u_8+%N-R"ޢΡկ篇/ѼMt) #^!1wA QFf''V%+-Vz lDoe#P.coul5Ռoo㿀_@_'o%o7cQlւ}Ӑl/F'[?ۿz15 8g֔Wٳ?;Կ=4?!qc4v]Dt~L?2%gah^"HO @=@ co/%/_I/TE}>s@@ޡ1$@@D@Mtוbsiu?Qy!|DVt$V$V𙘂I!_#WSF_U0PGNka_sRW(8OJO\Oҿ8///(/8J^/տ/ȟ//??$?6?H?Z?l?~?rd<?a I%Aec ooonKxk2?@xf"p*accfUppby&Gu{p*ag5p1>]mn@džKn c*]BTfx`@pOhf (>/!¨<=#ON`$&ޱ#Bߧ%\&5?G?k?}??????? Op-O?OQOcOuOOO_OOO_X_)_;]n *aЯ*O&8J\nP_q@GL@B^r-ADctL65TBM%a 1hA@t5A9R@ @l>D Q<]AL9QQƜ_VԏEPz|>O@@X,?@p:_N?_dge$G[Prr;@1@u'@u`F¯?uPjr_b:Phfsrqpq`@VL_q_qQ^r@1ss?1sBvCvdbbbo @-`B!E\$MUr?wCwC.??Ov d)IaH` 25@ġ2` A k0cunqФAd0 @nT`fT0aw0supi0rcy%4F2`@nP >SJyP@e/ _XOfsut߻ Y.. `@Dgci}0z6`@Mi0gw0i)ik0\˨B1hEFdt?@F BP(?H^Lgnuc5cb aar8bE@m! l AQFga` Rds6g5f- `ŢP#` LATA𔁾Arn_g~X'k[phFЂj@Rr̃AQAT@aM?@<bv1JsTbz՛P3@\X< ggL8"TlQQ|B23S-BsїQjjQ_2$`1\89ƒ:hBaaQQ11 b>`r15ABQtrqQ~rqQ2r3qQO1l=R>>ٴyH‚TD,@k1۠zZ5V5kAku!Ci0jt1|,FpgDkB vn r\?P@(2ЍBCF@ @3RBZ"cvWx`j*ufI{sVc4V\f vC/~HWsVƕhE%/WhEF$/_/asV//yZg bBr0a`1,;fl?@rR#P~3@6P@;`o@ QV5 )q}AkZ e ,NuUu“qn AT0V*1Т,Ғ? q8ѯ?@ };`&a;Ґh;S@א̀lV Н̀}R__?\-X-U-I;SZa[f4j0 *o____U@jo8oo\onohaC^guQ^qè[6t @ark-Rn a;ңzd 'oW{woP)z'9E QsX@A},܅8kN=pTbQkOQroxpv{-uOpvj-@ϏhkzQmujvDR1 dvAxXZ!Ə؏\O j̫pvq w򟳟Gpv^yϋϐ0ƿfxš~OO3VJ}icLQ3mK&aV&-~x&axcOJgF_x!0JMLt L#i:הP1 kc2ܔ^avov Qߜ|ߗߩ'D)Z q׀OJ@Q3 &C7/&Xр` T av\@lShi"rt0v21($$1`eOrfZBd-)1nMA/A>7B@B.10;9o3!Q2_=Ў@` {1ѹ!w1QTEg9~? BP(?@@4>An211b05=Dɺ8E42aaV222RHD$sI DpJ-Da=LGD?PMTD NLaDOnD!aaqQDXRD81[ CTLDUD6RVDWDUXDY$DZD[ T"(?q?q2]$TQJQ_>TY!!S YS%g!gR0a;TpDV1?ZV5=A 128{#@vn@r\?_'367-9a,b+Q?1^gl(DRU1BXRB8V5unU+B8`@p7.?@-n3"G2U6eowf8A8A1d]3umRAdjU`$oU[U@X,!?f~hB!vuZ|a=V1ьdRB\Y5oDqя8w?!|aRAa,gՀavubolF`0 _A/ASewƚF|>o=a|a@Rdv ~a¶q!Qcu- ӿ՟ͯΟ-ϭ|aRAdVRAf6CEBϴ&]V5䋷Fh?FL/?F?4ai?a"K%JR"FM Fɟ D[,d>vuz1֥طҸZuLóՍ堞,d)wAs߅ߗߩ3ZxFO=O~߃#Z6F]l6Pdt!Nd814 .$y,>sI [[/#q-J'yA// _/,='?/[/5??$:_OQO|VܦO?Xa^qDf I^^$O6O AƵ,/768noV0?@֬@q?@L?@(>D(%{S_76Lx?@F9^?@mxJAr<0b 70 0b*06030Ny`F!>"D' 81 UDt(!t c!$a%@ Adsg*# esk$// Wv܉t9TF`B?vPD4vP$]9ڣU1( O-D&UA%U f -h"/T))+U( g&+ P- `m-vT ?!}-a8LXQ -H*)A/"/) EQ.///feArial UncodeMiS6?/?`4 R$fSymbol$67fWingds*7 fECalibr@  ?$fSwimunt(S$fPMingLU(w$fMS PGothicj@ $fDotum"|i0@ S$fESylaen  $fEstrangeloU dsaC 9$fEVrind1a8 Q$fEShrutqi| Q$fEM_angl$fETunga<"@ Q$fGSendya*/ (   R$fERavi"o Q$fGDhenu"*/ (  R$fELath#/ R$fEGautmqi | Q$fECordia NewE R$fEArial"*Cx /@~4 R$fMalgun Gothic |w L$fETimes NwRoan*Ax /@4$YEB4Y.BPM%BtPr=B4I3BH5BPGBtP^:Bt7BρABT0B@1B4q0B:0B3т;B4 .B:9Bs-BD2BDP҃;B, 8BD,EIB'BJBTheDoc"Gestur Fom a FhrungsliieGuideSTR Normal0Byte oudrVai bl.37,Byte o]rvaibl.370Byte oudrVai bl.38,Byte o]rvaibl.380Byte oudrVai bl.39,Byte o]rvaibl.39&msvNoAWutCn 7ecPage-1"Zeichnblat -10Byte oudrVai bl.4visVerion,Byte o]rvaibl.400Byte oudrVai bl.41*Byte oudrVai bl&Byte o]rvai7bl0Byte oudrVai bl. 1,Byte o]rvaiwbl.1"Zeichnblat -2Page-2EinfachBasicRe_chtkRectan7gl,Byte o]rvaibl.410Byte oudrVai bl.42,Byte o]rvaibl.420Byte oudrVai bl.43,Byte o]rvaibl.430Byte oudrVai bl.2,Byte o]rvaibl.20Flus normaFlow NrmaBUmgekhrt sucwif_Klamr Revrs bac6msvPreiwIconCopT Pa g&msvStrucueTyp 4msvSDTarge_tIn eusc io 4msvSDCaloutNHi'ght(msvSDCalou]ttye2msvSDCalou]ttyen *msvSha_peCtgorisOrientaioHi_deLaruAtachToSide"Re_sizW_thTxSideLewadrBginLeadrE n(WHBoxInterUscin IsEndnteri oExtens7ioInsetSideMpont&fnMidp}ontOfse&CaloutuSyeR oCStyle1CStyle2CStyle3CStyle4CStyle5CStyle6CStyle7CStyle8CStyle9CStyle10CStyle 1CStyle12CStyle13CStyle14CStyle15CStyle16CStyle17CStyle18CStyle19CStyle20$Orient]aioR oOrientTopOrientWBotmOrient5LfOrientRghOrientAuoLeadrRotLeadrH inLewadrC7nt$LeadrMipontsuAsociaton$Design}fekt.1visUSETypemsvTheeInfomsvTheeyp(msvTheeLatinFo t(msvTheeAia_nFo t,msvTheeCoplxFnt6msvTheeLinTra sprncy,msvTheeLinPate r *msvTheeLin]Wigt.msvTheeLin_Rou dg@msvTheeCon _ectrwra sprny6msvTheeCon ectrWPatr 4msvTheeCon ectr]Wigt8msvTheeCon ectruRu di g2msvTheeCon ectrBgi .msvTheeCon ectrE d0msvTheeCon ectrE d2:msvTheeCon ectr]Bgi S z6msvTheeCon ectrE dSi z6msvTheeFil Tranprncy,msvTheeFil Patern:msvTheeSadWowr np rncy0msvTheeSad_owP tern,msvTheeSadowty l0msvTheeSadowXOfst0msvTheeSadowYOfst<msvTheeSadowM g_nifc 5to4msvTheeSadowDircton"msvTheeCol r$msvTheeEfe7ctVerbin dCo}nectr0Dynamische_r Vb7nd(Dynamic onetrTextPoWsiin Textan}mrkug$Text AUnoainuAtribuesBpmnId"BpmnCategoris(BpmnDocueta in&BpmnArtifa]cTyeBpmnNaeBpmnStae"BpmnPruoetis,BpmnPruoetis_Nae,BpmnPruoetis_Ty.BpmnPruoetis_Va7luLBpmnPruoetis_Valu_EUxr s oBdyTBpmnPruoetis_Valu_EUxr s o]Lnga9e:BpmnPruoeti_s_Cr_ela oBpmnText$BpmnCatego_ryRf$BpmnEleetTye6BpmnConectigOb]jTye(BpmnCoditT ye4BpmnCoditExres  RBpmnCoditExres _BdyZBpmnCoditExres _Lagu;g"BpmnMe]sagRf,BpmnMe]sagR_f_Nm8BpmnMe]sagRf_PruoetiBpmnMesagRf_Proeti_5NmBpmnMesagRf_Proeti_7TyDBpmnMe]sagRf_Pruoetiw_VlubBpmnMe]sagRf_Pruoetiw_Vlu__ExrinodyjBpmnMe]sagRf_Pruoetiw_Vlu__Exr]inLn uPBpmnMe]sagRwf_roeti_uCrel o2BpmnMe]sagRf_Fro RBpmnMe]sagRf_Fro Prtic+pn.)TyeBpmnMesagRf_Froj Rl FBpmnMe]sagRwf_ro Eti'y .BpmnMe]sagRf_To NBpmnMe]sagRf_To Prtic'pn%Tye>BpmnMe]sagRf_To 5Rl BpmnMesagRf__To Eti#y  BpmnDirect o&Textan}mrkug.49*Text AUnoain.490msvWarwnOPesoal5If0Byte oudrVai bl.46,Byte o]rvaibl.460Byte oudrVai bl.47,Byte o]rvaibl.470Byte oudrVai bl.48,Byte o]rvaibl.480Byte oudrVai bl.5,Byte o]rvaibl.500Byte oudrVai bl.95,Byte o]rvaibl.950Byte oudrVai bl.97,Byte o]rvaibl.97JUmgekhrt sucwif_Klamr.105(Revrs bac.7102Byte oudrVai bl.10.Byte o]rvaibl.1020Byte oudrVai bl.54,Byte o]rvaibl.540Byte oudrVai bl. 5,Byte o]rvaiwbl.50Byte oudrVai bl.56,Byte o]rvaibl.560Byte oudrVai bl.57,Byte o]rvaibl.570Byte oudrVai bl.6,Byte o]rvaibl.600Byte oudrVai bl.61,Byte o]rvaibl.610Byte oudrVai bl.62,Byte o]rvaibl.620Byte oudrVai bl.65,Byte o]rvaibl.65HUmgekhrt sucwif_Klamr.68&Revrs bac.682Byte oudrVai bl.10.Byte o]rvaibl.1202Byte oudrVai bl.11.Byte o]rvaibl.1 22Byte oudrVai bl.14.Byte o]rvaibl.142JUmgekhrt sucwif_Klamr.126(Revrs bac.1266Dynamische_r Vbnd.81.Dynamic onetr81JUmgekhrt sucwif_Klamr.148(Revrs bac.1482Byte oudrVai bl.16.Byte o]rvaibl.162JUmgekhrt sucwif_Klamr.167(Revrs bac.1672Byte oudrVai bl.168.Byte o]rvaibl.1682Byte oudrVai bl.186.Byte o]rvaibl.1862Byte oudrVai bl.719.Byte o]rvaibl.1 90Byte oudrVai bl.82,Byte o]rvaibl.826Dynamische_r Vbnd.83.Dynamic onetr836Dynamische_r Vbnd.85.Dynamic onetr852Byte oudrVai bl.19.Byte o]rvaibl.1922Byte oudrVai bl.193.Byte o]rvaibl.1932Byte oudrVa]i bl.1.Byte o]rvaibl.2 12Byte oudrVa]i bl.17.Byte o]rvaibl.2172Byte oudrVa]i bl.18.Byte o]rvaibl.2182Byte oudrVa]i bl.19.Byte o]rvaibl.219JUmgekhrt sucwif_Klam_r.236(Revrs bac.26p3hSG3T%G3$G3XT E3TG3|B81G3di.G3dfE3^]E3S1G3OЇ.G3\B1G3/.G3](G3hSG3%G3L/G3E3ĬTG3R.G3ܩ=1G3n+G34(G31G3d.G3, %G3hSEG3iSZG3TrE33G3 3G3|E34E3DR.G31G3tR.G3lI1G3Rz.G3fE3fE3fE3/G3.G3fE3\fE3T"G3T;G3tZ;G3!G3D<9G3)G3,7G3dO3G3$*G32G3Tލ-G3T G3$T*G3D"G3ԉf$G3TE3DTG3<3G3ю*G3$G3X3G3T;E3,KG3i)G3'G3LiSG3diSяG3iSG3iSG3iSG3iS1G3iSIG3jSaG3jSyG33G33G33ŐG33ߐG34G3 4G3<4-G3X4GG34aG34{G34G3T&G34ՑG3 G3dTG3T.G3TMG3ĭTjG3G3ԊG3’&G3TG3,&G3T,G3TJ"G3l G3,G3D*G3t.G3<8G3H/G3w+G3Ԅ.G3Д?G3<8G3 G5G3=|9G3D4G3|0G32G3D=K;G3=9G3=8G3/G3>&:G3`2G34/G3$2G3\2G3D>%=G3b7G3%G3ԋ'G35G3 5G3̆0G3dI)G3r G3\]E3GE3GE3GE3GE3GE3GE3GE3GE3GE3GE3GE3G™E3GƙE3 GʙE3GΙE3GҙE3$G֙E3,GڙE34GޙE3DGE3GE3,#G3T %G3$T2G34jSNG3c&G3)G3)G3<5ۚG3X5G3$G3$3.G3Ta-G3Ĉ1G3 IG3NPG3QX9G35G3Ԍ(G3Ӝ&G3R4G3-(G3U1G3$N=G3NÝEG3,$G3,.G3DRZ8G3d >G3 О=G3  CG3PZG3]G3NJG34Q1G3DNKG3T ͠5G3 <G3l>-G3NkGG3 3G3D 6G3T$G3?)G3h+G32G3IŢ1G3D.G3I$1G3DU.G34yI1G3$g.G3/G34 P.G34­?1G3 Pp.G3Ĉ1G3oϤ.G3qDG3KA)G3@[j1G3@[0G3T1˥1G3X.G3?*1G3tX[.G31G3DX.G3<1G3X.G3tG/G3Xv.G31G3 Xէ.G31G34 X4.G3b1G3J.G3XAG3t'G3A[)2G3TA[[0G3y2G3y0G31G340G3tRNDG34)G3DbX6G3$/G3 DG3od)G3z1G3Lz0G3DG3$]2)G3j[3G3j0G3,k3G3dk0G349!3G3l9T0G3/1G3/P.G3D I6G3Ԕ/G3H6G3tX~/G3X1G3ޮ0G3<3G3TA0G39q0G390G3:ѯ2G3L:0G3:32G3:e0G3:2G3,;ǰ0G35BG3CK9(G  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~     ]>rFxml:spaceprsrv]rFxmlns:vdhtp/%scea .i+ro of.+o/i iA/20*c6%et/ \n]rFxmlns:v14htp/'sUcea .i-ro oUf.-o/GUf=e'v=s=]o'200'Eet1 lnU&U-.t4 "H$ @Y,6@ d!C-*7 AU %t4 ҭB C>-^]O7"AJ@GNR@ܢGQ7RH<(H<(JETGREGR{7   g"4FXo 8)(N1j@(/?{PhBĿ6 PZlz`S}~3 BvR٘R$I!$OI)\9W'^_j~~1__d5P25a0TU9B="diG zK&KG?Q!{W*'"Dy ~%!borgbackup-1.1.15/docs/internals/structure.vsd0000644000175000017500000030300013771325506021406 0ustar useruser00000000000000ࡱ> Root EntryRoot EntryFP7pVisioDocument pSummaryInformation( DocumentSummaryInformation8x  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~VisioInformation" ՜.+,D՜.+,HP X`hp x 8 Zeichenblatt-1SZeichenblatt-2SByte oder VariableK Rechteck Va Umgekehrte geschweifte KlammerDesigneffekte.1Dynamischer Verbinder KTextanmerkungrb ZeichenbltterMaster-Shapes0|_PID_LINKBASE_VPID_ALTERNATENAMES _TemplateIDATC010498411031Oh+'08@P\l mabemabeMicrosoft Visio@`7pVisio (TM) Drawing pR@ovRm:mAw:m:mAw:m0A<u8 !fffMMM333Iya͋V׻ę5@@@FFFG}PFٖ֠~dյ91UJ:DT5I[1hXT@. .Ub##/am0U\?dK[&/~&b%2~ ! oLh $c)P?#~)~,,J & "& ,i 4&?C"/.?L?\.?o A  },,,'p/%O6$X6Q (}j?k)?"   *I3#oC=S?#A:;A;dUdU@;*dUdUdUAOpNOD`;dUdUdUdUdUdUdUzUbS)R~;4OL%=Qip;RRRJggRph=Qj .@]E' %p3Ԣ~|bFp#| | | ~ui??p9[!i fTd',50fZOlO~C.ߐE#PQc:  A ,U345&6&9Eaaj6 8d7?!%3 URg L-ak@9$KtiQHeuDDT(!cp;/paM-uh\`(+AT %5+H>>5a%h34ovJuX0Hp(!MmcP=?`(z?84?D0pA D7!U/i 4?Q`_1_JD;a3(/@}_L/^/L%a$G[aY %//// ??.??R?d?v???V[l_ru@+y_w-uaDd/r*&oo*o_Qv4DOOO\U$/@__o/DA/Gj:d2(joG2O5j7@oo ooo?&s2;IO[Odd 6 S( $b4@SyYkO(Or&̗T4EDP7H+&g-.UEd \Uˏݏ____"LwU2 r?߱GXDeTePNkcc#ER¿{%ܶ/o Mv{&l|$6&w-!a!!!1Lpqq;PasqKwHVNT`F[ȓ !z䥶1 @Rdv%Iy -bbbbbbIbybb쩓DMv4,}J(̽~ .?Z//puR/MrGK9&/@:?LyǾٹ  /r p/9cߓAdUe?w?O2{OOOW _u// ??S?_,_?2Y?????oO@$F=OK]sOOfOOO"e 3C _#5@_R_d^uc,aYo-o@CgG$t$#hoIlZFoBQ`Ѡ1`ConectrWigtz70aϧAQ` Ed`cl&K`Pten´99K@q^Wc8Ii߹l‰ݣA͝>†p&U5Ruig;(X3a ŀ&U=HTaWsprnyC™ͺIJrBgt24`.SQȕ0xPt4"H$ @Y,b'@ @ A-?E7 ;Ut4"H$ @Y,b'@ [: C-pG*7 At4"H$ @Y,b'@ T7A-$p7 ;t4"H$ @Y,b'@ TA-$w7 ;U!U"#$%t4"H$ @Y,b'@ T4[iC-dp*7 A&'t4"H$ @Y,6@ ,AA>-`7"AU(+,t4"H$ @Y,6@ <\4C-pH*7 AU/0U1234U5678U9:;?@UABCDUEFGHUIJKLUMNOPUQRSTUUVWXUYZ[\U]^_`Uabcdt47"H$ @Y,b'@ `C-pV AUvefgUhijkUlmnoUpqrsUtuwxUyz{|U}~t4"H$ @Y,b'@ [_C-p At4"H$ @Y,b'@ lTiA-pw7 ;z23456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcd*t4^"H$ @Y,b'@ `6C-q_ A;X4@` MR@`QNR@dkPMR@kPMR@d`NR@`IR@D`RNR@DlP`PR@lPPR@$mPMR@mPQRH<(H<(H<(H<(H<(H<(H<(H<(H<(H<(H<(4E`{RE<`REkPRE lPRE`RE`RERE|lPRElPRE\mPREmPRUR/0 e UFD# Th(/TYTYBBUF~?x<F BP(?4P?+ O @HZUBH??!t 07{B`8Byte,BiC ,NK bU lE"KY"i;nG DaC"h!TA pG SoUfC wm rE"S{ d!cPe ,>bjE kC  e^| 7  3u ,7=OasNDas Shp}eaufudZicenbl]t) ze.HD   3 B>Tp[/Lp?as&;.h> ,b MJ=UF~?FV*JR?F\.?FM&d2n?Q6 U AMA333u` ? !+5ChMW]u$g>5 L@X!$5 /`Vis_81.chm!#4E"67M `_!b-"#"'?9h:AiA 9 $''2ǥqE0&!$E0&!qP \@6%A]Dc]3V:?Ka!Db1OCK%YEiO{OL@ODJv%YKOO"NBSB53^@M^U_g_y_Rg@14ot'2TALd"2(Ech,@!3qlf1U1 m3 =M!l+TM9M]a6 `($T 6D UF~@xTiC;'h>4bP9 EUF~?L&d2?F\.?Q6 U AM| ;;u` ?*#)3=KU_euo>5 L@X!#$5 /`Vis_Sba.chm!#57_ 2#`$g!-*#*'?9  %&/7 aJ=?Iy+#&Z&P?B#2N贁wNk?@J'q ,A MVq6q4!*#ekab1; ?Eq6 I-BCb:@e2C1%>> OO1EA5~%5]NGxOC9AOI%\O`JOOrn_gPo|'TAFd" (?cp,@&!iA%9 -$b]'/2q`&!gE0f*laUmHE?KlT(]9^]-Ma6 `r (QJE-Os~%_s%/\h}Mf!>zbcrA bb HUr ~L)uE/>F[*#DJB U6bbt'`xo@<QzoCPUFDfP h VTB UYuU??Fxha T,aaUQJ\UF BP(?@?F~?$-?P nL#l]dV^ ] $g]Q"U"Y*"]>"~#<?&#A&Br&C"u` ?PYu"bl""& M&u"u&u"u"u"u" ,u" ,u"4,u"H,66t#\"u"///$u)ar;21'u `Fxu21yB}BAABA"!^hO$,@ZLupO耑 @Ca4R#qt{u*gvXWcZufvvWuTj*uX{u ]ʤ *aGIJKMNOIQ"R$QTܝQVWXYC3333.3>3N3^3n3~33333333@33Vf4b@Vqf0φЬ uZ1[]^s@333'373Gc ƢEt&EAxÎ;BA}@c"3"3."c Ӣp x/3,S/e ~/iRfSIA62ab ZaLxVIǪ *:Jzךת'OgEa@braĠqAQJ`FWO@r ЀiC}F@`I@)@AEBsZeȓ3U ,puӐe(p 4>{ScNV:fcfR:bWKqP?@pA3VUbQw Sio@(h#FtϿF@ qRfa+_i&7=akVQhaRkQclQohaR2brˡF@#I~rW{ 3~TE}O bqO cy`I@Ё`sVRڀfK@ڀbooOlw wKuK`1s*8zK@JqyozI@ eoo /uB s!xx7SlP4;walPe,0kNq @Jq.-8r JqM9-t '-E1*dbt+r sQJQLŌH{M0tb;{O,=CƁS=ԯןƋM2'Kx~{עamzD1-?F|ǓnJ\˟ݟITjU˫ˬ7F:-˫YkIՎ_TF ?BP(!RQ!4 Iz{7 F.!`}~k QhaqY.!(SFxfb@&7! kRka},1ff",1~,1ff ,1WVHf312E1kaB12lQ12a(712,1HfWV3UYkaO} mlQYBaYO(B,1WVHf3ka lQ"(;" FHfWV3Y"]#kaS#" q#lQ]#"]#S#"QoDcjWV3,1HfEFWVCaHf``5d 䰈qGA8rL5k0BAK6 k0L5k5L5|0V!CBL5 = |5QQ5&t?U: =|5vŦ547 P@QzqE4HQ7b&qn24Dp)CTU+0F2o$0jjSRjaRD9Q#eM1sB=yIZR1s^lZLV_4pQL׌HH.5Oa 5.. ^fCmnfv3p` T]`avelShift`$xa_d@a3p`OΠ`fs`m!*_a@@ VV%Sr V:67sGr:2!l7s[r 27.1i$3s 1T$y@a@dvkQXU.q.'2rqty!,0}v`n$qqt7v-x8cʁEl@baa%]1]1!H$rI*J!1LLLDŽkq+MԄ .NAOAAsQRe.S"T/.rq.qqAWVqqFYpRBZ}/b[.)\r].QLQ_1(A`ٓza(.44.;[Rx a;`"[TBf"< 1W`B`sh]`cu g`f&+o]`m`t`l`g`l*?`0qH`.:jx +\T`x`bdvq_v!L5~*ϫ16DP`)_R`Ht%k` 7DVhzڠ2xQBuݿ)/\BanAM_0qx2Ϛ0AP*(O`a`*4dFxgy2?Wʷշۯ-K08&4Fb[mD{ǿDD!s 1ϱPGYϋ(6ѲĢϩȩ5hA`# &AsDEy3 UĻɯAv^2`OnwAQ9)(ĽBT0fAvH`Uta)Yp!3EWLnew@)ncbā Av9H`ER#t6Hl ~)99:'ϟoP$tm-At!BT)ங'7İDZEBs&1)/I4+r/o/Av(* t/% *Pa#/?4Vir4F:?@^?mAiCOOOO6O=nO Z\9?K8/ `v GAAtPT&xJb=rb+ Trt*P@?B#o54trptb T mkqőԁw`3P?squ{tcF~ϿF@ Y%ukp{`?u %,Vrq @V4`+AszSPsT3 )5>%j{ိqi!stgq xŕ2htbrQ? !3'󿂱/0 c{[яJar<`Ѡ 5 %*5}9ᙓ @r^rlA$Q2U lQQ؉uyhU+424r@1Ryɒ^rx@7.?@hzi?wԐH}߈7c6z#o/|UD`a1H_\"p1@&WrBdeKTv7.!ԟ@V) `aGo,o>oPobo1qyo>TooODowQS+F搞R6);=[vrrQv>LUF+q StFUevS߅x$~&qޗ݅ԯ1).UaO|tǢKT*ϰd4)'ߕr|<ߒM2/WkNȏ%rQ|evʭVlߟU[#Իg.gM1xweۿoo1CBSCS/e/w/S~eB]v KcVFO/GXiPdU:Ǽ ڄE-,/wb/cƻ͟E/?'?*&)BOZlcƄdUO7c*&tG);AEOb1ҿ)M_Cb.ϛ]!_R8Vb=x](' _ ݂?z-sT_Na2s/@.g(O]Sa5 _;;L7pwrra2wcCB|WV.gWi gU1gtO@|{럘p  D g,l;;.o@o*aa Oo0Ґjo2kJΆb`b~Mk 2(s.®%)nd\S?l$`-@K ay!*t1K쐴^VQ2CIUdUpς1aanOa7P7߸y!X16BòJa@a WE1!UަQb@72Ce$6N ¸ջVcIpb)L(aO[Ne4 (ga# f8 \ 2d T!bu-(;M_qڋgֿ`4@?F\oA`0ΉjM09 Q(3b8ye/˸?FƣE?Fp5Ys?F5&!Q@Ή( 0_Bԃa2U0*s3?ܵ0Ԃ2 k+ݓ0]'%Qn?Fѡ?FW^bd%?F?Ԋu-Q/c,DJ}?5^I,QIƥ,$~//'% ?F*^Y?F[E?F|{T?c,|гY,vݥ,-?C6*~??b'%OZ`?Fa)P?Fů$_?FPvϟ'͚?c, h"lxp_Qҏ,mV}b!)/L FAOSO+~'%`0DLJ?FNqVUO:Oc,~jt7A`,ףp= ,7d__Kh'% QuU?F`4pF?FLAD?X@t]_c,io?JY8FZ4? qۄV$N@a__'%X/~%l3!?FeY?FJ~ӗ oc,z6>LZ4kw#awivQooj'%'-q?FmUa`2#+?F(%oc,Y7 }j GfZ4 ~,ClL^!UFV{WE?FcJΦc  )?H},""3\ \ -YVqjǻ )1̶ Ѡn<δ m|ѠaQaCSVGh4cfUpi$5,)֝F3L&AP"iЧ Z3*p#q8 |`rXӗze2ZՊ@ #5.?\zi{B  ȧ#ޯ&&JJ\BMV㖿t侜+Ģz`rl%ǨΩ)1@8"4߀Hߒƿ./1*Tύrˤ]c-=( @=5-2-9#.@"~C Ȓ#@ 7?D`Ofrsetx %q,kڡv hE 5ɞ)3U+KGAb TrKvlԐhi"//ׅDzW280//?(?< ????QcOwOO)O7(QOأTx7HUI' Xߞ^Ba5a8_A88F[K"/}#ĠzT݆B _`$@eŴko\7hoj(PUFDfP h-VTYYU?"H$ @?Y,b'@?xTT;͑U@ !@ү&P6 lAu` u(Jݩ c?EDe34 D   J ũ2qL lvF BP(?L0 `LineTray sp r{ ncyz۶&` v%P t e n&$`u v%W{ igTyt&a` v%_Rouy dw")g&A0'2A{Gzm?>5Q?A^n"`Fw lo0~/+n"j?|?+5/!4`S:Ta ow?<*+` 9?& ` 9S5@y0eK $KXOf@s{ 'B $KYtO 6`9UM@gy i@i04Ai '. `%9Dw AcNul>k$Uhh2Q2QEXT/J> 09([_q_[_o!otgU__o_oTEooo,o1A>Te\Ls c!2TCt1MU$U<"itjt AktE*Q*QURmtQ!!otU p qU r s-3"!!uGRvTlwaExn11Uz{Y|})~ARɄ22քeㄐqJ"T c 14.m"?<%Rpsf0M8@H8 MEJUU8!r5GrK&HZ&H͟@G+G3"?GERml&Jlmyome>mGG-GAUV>6HԂyϿHႻFSP6HuANqe (=MQ_'0Uhv$on!3` LDnFxB'3` A@iBݞ"4` C m@bAxB}lewL9WIFE/HU= _6p9yEW49dF[L#dJhMB `]@eko+׈]:aG[NhPUFDf h-TYYU?~@x]@L'} V6lX-1u. Bj2u2Z2j2#u9r#ULH/MZ1+B#AD5 60`Vis_SE.cTm!#20AD%`>Copy<0wigTt ?DU1f@M)@c<0o+@'ofdBUArX@Aad@iV@n3@ WAl@ef@R@6AtBvBb@hB@lBAJ@=## AoG?9/#n"E&444 7I#$@HB59_lj#"SV6U8*l>(Uhn E /J$&9"(Ln eg5Le#pheo'T !! Js~o/|R12'"U7A%O3W_7xr5(-6 `F2 xqu\J@#:0b)&`AAs)@E T@xd@Di e i=M]UJh#HT0#3 UEJ\}|U@f?@3!?F~?FL&d2ٺ(1?P Ǜd#pa HkaQ"U$"Y8"Mx#BU<&?&@&A&C"u` ?F]o"fpy" & G&o"o&o"o"o"o",o",o".,o"B, 6 6n#V"o"///$u)ar;21'u `Fxq21uBAy B AA21"!XhQ$,@pup`Ca$Rou@+u*LfXQc* IRg fbY-H- M ZU ZZZZUZZZZUZZZZUZZZZya-yyIvJvqyyMvNvOvMyEQvRvQyTvTQyVvWvXvYv3* j|%5EUeuu 4 蕺!BZ)CT@x@U @n@o@a@%iEPn*Љ4J1[v}y]v^vc@x3+"" qؗAU2Aym@xcv$"" zp$E1u/w3,#? N?$hVW9AB[bSq20S hV1Aqׁסױ!1A7Ig?abrXqkiqruׂ6rS@6'_@rp9SR2MV@׌aV @aU-ܕ?XrcU:uJed*ɮ᧤N@q37sfjBA'b6bjB'#KPRQʾqjeArQ c@(3FA{aٿA3b!&»F_ v7_M%;f!xjB!s(a?xjB,2brrP#VFI%r'#7 3%Р$UM_ }uj_2 ׂP5GׂWsZ2bQwvPQrT_@|wu‹ׂPc?P 5uR܉͏Cgyc(` q(`e80{A @9-܎ Ʉ '؏bAP42Dr \sya@Q_X俋NZ]b6 O6͒z{ # Z{u£@%ЊׅqmϕDR1oL^c,ߛѯgZ $j%{|ב R{iۄ);qσ⥞occL&d21b}aN/9K!KG'!cpK1a81YNSFxBD $e0Bof$z:cRTrDBʩya#HuzR j"^vc ` T-pavelS/hit`RHq/tq `OfYsfps}1 oa@@VVSrlV F"1+-27i41Tb4_qt_;aXUZ'2rq_!n0^Spn4"1PT_sE<raa-A-A1Hc^IpJ}bL;M 2NQOQ1\Q\QQؔcWRS򔆮TqqW&o$Y@RZMb[Z\g]tpapa_TAQQAbpJvq;Ϥ+bܤoa pQ+d0AW`BfpsĠh-posugpf&+o-pmbptdp^lbpgfp<XR*R?`0ȁfHT` jd0+9TfpxrpbASv_vPQE~ 1;F`a`пҡ<Rfp%tknpr !3EWj|Ϸ2HaB^׺/9Bmqn*<0NHBew0yQ̀~`ϿOdpahp+=#bRdﷰ4z:$ߢ B<R ai*rp iisXmTZdj `F1H<zՕ{xMOai?ޯ!_` tD.Jr5<}= ` K* `[Z l v 3mAxD{z{.gMbeᒎcQɥģ"Ĕ,/ħ<w{) ѫcĔpĽ} ѮmJ5N13_MsSspy`ID?S/c}ba K"eb@o> )עa2¢ `DVh%# UD kmjBt2ߧ߹$c褣%!rr[Rf`}aTy7[C3DnUbe|b;ruv`Up0;nr`1:븁X%`?$2$N1} Y4Brb@qAa-aa;qa `//5?*?cT Z "!0ЏQƽP׳M+Q ܒ*QDb)QX\T׳a_uu`bېRIu}ܒQ (l\O~?hzP-îLޯ)ّ `{Cݕ2YǕT `ƲG . !ё1ɕHᝢ WGMĪ ?Ɓ%Qx?ˆt/$Ͳt ڔ,ş$O4  ,o>oPobotoobo1*BBZВՑ7zBoaa^屓aG9gl6ҩ$xrNPƯدk)D _& 3EWi{ߍߟ߱ /THZld Z_VjhRdv=X|w$%F / >Pbt~44FJ3x<T+///(/aNO:QN9O:////./??'?9?K?]?o???1???? OJEOcUQOOOOOOIYvotuoacVh͟y_T_ j &m toB)j ooQvwёʮhg_#F K?F&?x?0l}G{,:`@"   -?%aEr%Õ))EyXR} o`д$#?ęxQIr/xUt,bwXUl0i1ewvv֔tQ=Q)R}h4FƶwGYs?CpYՕY 4e%{QHvsLQgŘ{)tUbzf^ȯfܓ֔% E:a`*q>^TbtxoMdǏ@هT/f/˖{OtC]c VqǐI֒FV –nUF?vn qsɿ *Ys?F>?F:Xc?FMڪ﫰|JܡtQq&A=QngiC21MwZ漣A$rqg0N܏tE˔F*CUy ,Xjw=Oas߅<`ߦ7 {RC*OoNkrfŜo-?QKe_? Obweoz}&U.V@U4kW5wR<k 7 fwdůWdvw.ϒ,u6^.ڌ@(]/5Uk@ k@Dw,|@q K0/'LK9'Cm0ϾSBs2ّ5)\ls?1D@ q>Aw9#uQ y~qҰq٪$E)U5G1r9{ a`pM>AkVRbדL!.UEG!ܱ'kWH٤*Jn}viIpU'LG(A^|{Օ4߭9w7;Օ f!+S2)kt/'م"Nj$6HZl~P`4@?FEO jM󎣈PuY Qᦤ8%/˸?F#eֻ?Fp5Ys?FwtÊG?( 0O(LпDJգ<}?5^ITLQIjL$?~OOI5 ?F;?F[E?F3Ϛ)O(L|гY<ߍTLvߜjL-C6*C_U_'5OZ`?F3$i?Fů$_?Fir8_(L h"lxHOj?FeY?F܇]}(Lz6>19lTkw#N<vuqRd/%5'-q?F0 B,?F2g#+ׄ0]/٨(LYЍj@ T jLCl#15v?3FV{WE?FD~ΏJNk(@(+ WIHg}jLzB՟$q$D*P!)=6?zH7j A^ 3=< 2Aġa1aS~h4'+5.ϼ7O9H%F_p8Ɋ( b-SlJg&ǯٯ>qA>%7IWt?&t:NO/ag#mҳ31>ũ!bX,!.@-R^rC&Aϵπ!EWiSh:Mu^9a.^;g?&h&8!AOϓ{EEEW{>h"R7"(P!1('b!)1jUP-29Z3ˡ#@$ 1d>"wCCZ3Q%p1bؒC!p2@&7DZ3`O}f70seto?uܰA0&2,?>?P:v72p&1"E$A1oٿA32ÿ&A>+5R{w,0';)w-eV-jUCU+ a ?%A'; WTrvt0lghin0w8M=Bb7OOO;J!HR?+vOOOOO__????(?q>_]?u sEtjqttHD: #  h  T0]]9 # B 2AUAɿAhY?ٿA,c2ֹƿP6 Au` ?A@j$u#J.9axu`u]`bu`+u >ڍU@@M&d2}34"ÿDC'>"M-w'>"D(p A "'$"&H' O#&5 $s&]$2JD=#D> D6#U`l"_# #1?9#H1 uNWzIs;2 8J#2zGz3Fx$2[Ru(/YB DU_V?\.?/B:o:2A7P`BackgGPou_ndCM`lM`HQz@@bZ}H D?h1GA #X8P6d3662Q1CUBF4i7o117;y0~1Mr)Srm>de}mspTt "qq"1 1381812tMItBB Eb'b$4ANu13[h2AuBH1BʜA!ʩ¶jÄ[QЄT݄zrf>!$0 %3To1/u/u?8XxAC`IWsSwSwRs%o.@0RgPf KNC`teI`\ai!nn3uZR%d DM`kO`mtiM`5HQ-2HZv#`NC`o>!8V@MKsEŭAد3dVrbRaPags0bjkTykp~1])S搹qO`zfY`u^s;cTͱnA²oivlPA$*q#8@q#5$rQQR*E?eNRעQcBdg:b`OTndGPuNEb;StRaaGPdB[Ÿ-uijd[„6k~]QRƀ@gP@ A>LEs?e\/dϩs Aee"g 9bY`dt2D-Ÿpsr- Sqv߈;`aݔ֑#pS~>"ƒE*ȯ:b` Txk GPp}1Բydc j]aaAe(Se1dSrʴ &e%\g#d ɿRf);M_=7DßӁ„{81,[/O^kxÅi-ùƁ3 uI[Uu`  *L%LBW?AP)Es̲aٳTe"/)#We"ɧ??? ?:OOB&c_ //Y`6?H?Wis??]CJ_ oAUCGP?e`(__! m2oDoVohozooo0 siԛOo oW}_oOOOcV¯]WץR=Oas-](]uќoo~Gm#oTw {Ɔ҆΃S 6:0Bw)s͟ߟ'9ρJca:smQm Krаe/Ȯя- voP* V²#5VUo"/%Wo"O _._@_.o*o/**u(!S[mJ5li@eu+arw% UR°l;Q@nt9'G} !M @o#1Ϡt+o_1OCLRʿ.@=(*jMD`sprkND߂ϚByiϹV" ?5G&8߭\n߀޲5F/=$2&"4FXjj"j"/ɨu*FXjv_?q????9 ???Qc0OBOTOfOxOOO8B+DO!O=`Rotyfb)]O( !1idBm!?ȅKbB;rbrc`R@*@;H(iPB_)/BE!1v])g%oks{1tpnN;GWewyuPz%doaqm Bqo2;a2wtSkmQb@nvUaTtnsb bdds°bjTq3o: rrcjKbT!E!222?kl;!30kC2m2  aZZ!*kψC\\E&&BGO ='`C²!cRf Ot9d_D@qA,@"hi."4 b bA' Y! Pk?zz!!_lDZU!!'7!ƕP d>"p}9+6@,1X8d:PjH1/U0r4,,1)*AA/_"_o & #QMQ,0#` B scriftungm,1 `Cal PoPtQg1`E}u+iH5u1! 7 @@j&@@q`o?@rb?d-e?s)gUA$ ?$%vr;#xAZ%RiLPE" 2 0"U$2 ur! |u[]4 t&&s~rzlH1AUH1?iiu5UAA2zry$"!rByǤo VPQayl p-LPnPʡn/k?Pp rN 3Qiߢu PuPw 4EPtTIkrPo?PlΣV?PrPTRʧIEz!mr󢴳W 5D b9BTBnPnߤz 6VOLA !0jA_S_e_ŕ{W'&!3AP!|/2g1 D1y 3 H1D1oE7q;Eȧ{e|lE1@@~!@Fg#?F֌"??QkPyA`Q27W˚ATl 1<25l =cl@YmfxAj`PL!@@[g?+@@1#_  +??@*o4?PD}p\z׈-uru`u0`bcu ,0@@"o@a/ ("q6y B%#u2#*"0#ᝯ2,`6@@Ên`P @$Rxww &;01O5l^3);hK(a~"f''ׄ?[銞<<h.?l"dYկd#j_u bsUTK0a?DbB`@_bz I/!*98`P"3\ o"҂3q@551I7zrQQr 9:z:a:a9aaR=R>rrqdQhQrq(Qr q2aa0tQmO\[|RlqlqⴋrT,@rLQux҅t0q^qs pC&Av"rBpsx^֔u~*k sa4azuNu ca@bֵf7sa9ՐxU܅ŔrQ9PCaLJY„fD/@rF*PPV#Oj&TP@Yn`@Q.%Z$yaUBafp Uu!4q х4s q, Tto! ТZhQZi5,Qrc3WLP@*(B<@b@t^?A$@o.a#@PkIKr3b3KlqJj/ â\Ca/O Yn`PQFYYCOǐ2p:F>pKBrOgOq/LKHHKEKːYCJ=Aұ)VZn` OzOOOOE^Z___*_<_NXQ3|GCA|epPKd @A[-B^ cAY£jNT '_%k…Ž_Zoc ss&qg1]JSr|([w-v`DbKOb=~h>fkviKleN{>f8{gyjPsKϝ̏|vRTXc[HAmeZfDo1ock2DN&xݟ^pP*wwjo>f?ʗTٛg>f,GY^4Fܢhn??Qa%<}dAc)#]K2At^FA1?0_P_b_k؏?@ܑ9Zm(͖pB?@qK:?@W@ή8W?@S^H?ssZ*m(5=*/P#&?@!W/?@?@FD'A&a?@0?@vj6??@^ŴBI!O&J6/iлH$?@ T9_3`?f\Θm({E=T@m`m?@l6͊`$Zǂm/&0=ficUo@@K^9`lخgA%=,^?@tA XP@@z(fkT?@Ô1fe=j.ƀ?@P3{@@5 I]ͻ?@+ӟ wu=~@h{?@j"S~#HXt$'=)&3I{(?;3 BP6Oc= }…lO'ƯدH6?[0 O۶f_@3l~ !H<@)>Pbtφ X?y3yGPU)^BPԇOOOOOOO__)_;_M___q_______1i/{/^X" 6q'AS6m#p P:yad4//*/E$B ?' `?FM%:# %?Q.(%,",x>Pbt}R,>bMt=ζȏڏG"fFXj|ϲxU,įFX*pԯ8GB(ma AS4!̱ Dq 2aaJYPcy08̱u) {`gl6 @@L"@@Hh45?;C?FwG[r;1'u`Fx8<"\YP"Q!cZ"[!y!!02..ȣrq´"T,@-@<إN-9-L7Cz`WAoubҟ Ab;sż@.v̟6 ˟W'?̵A4!)4TB`xAp`n>@tzj7X3#|گ2п6=#@X)ܽ+?-ryN ~Au`u`@rb?5@ȩ.p@Őc)a&M/_)s6})7Up`uR!cA*qK@2-[!Ue)aYգҺ#@+("euF@X,@@cP?$9/f$Ptw2wy#wI`rSbK3`8q \~a, c֩hǐ⣰  ˩5< Nas i !3 w%;G/?/e u @N2 -< N6£1 h5s1.h fx sN!P 4b ?O0A&GWA///&6?+//O*`׹mD11CJ /"ǗNO`O//??M'X'jY K"K(;G"JK>1?/KK__O]?o?_"o??MR& vum]p0cKoeLlOe񑡡ojFxk@?vKuS@Rmw@|PmƁNaGy}A<"po Bpp9Lp A? vEqLJ PAdrHXLJ,HwX!kmieT~A#5@?MVl5^l6qBV0IPo oomDu׿߉Еe n`vc~ae` TmavlShi"ptпome`eOpfҳr$ ienюB@0wo0oO!~_=@`{U$erdT~?L&d2?qnSU^P^ e رT|E?U  ŕHfc IsJ026q6q|qLԣWM 2N&O@Qԗ`R`ԇAT&qUuVrIW)rX6KYCZPr[]8~~^]w2Ѡ%2_䨁@Ldq2јqq-;X |pnԛq'VUAAU{S@X,@@c?P?]2vÑrHTn?O#@2pcr2ql{ԑ١U^@dإʼnU~K/MEp2mԌq $SqU£rIdҥqҦ~ҧҨ(ҩ 5JJҫOI\ҭiҮvүҰD {VuPqM/|qf 1 q/$r89UƀdK/Eb].J?/ ћqXՈ!2яՁ=7&LMP5,Ku/?+/UF BP(m???*?og6oo?M@]f?qoooooo\ke?__O\Z ՁX9/ӘC_U_KXF_}6Z?Fe#?2”,zNt/DF`0 &F1l aPdl/BjZԘl-U%{ 酼N[b?zf^5OHƏ؏bo2oBbd?Ffl6&FF?FuWu~Z}1Ԛ\ ϥRsBTЯ*oo`GÊϮݢ${Կ̗ޟM2.RkO֨ϸ"4FX<ߺ̱ 2U9MSiO]yd!4q<|a|aSeOZAkQw9+OO!@ Ai1/C.</bsPbOo@nZA">Wi{;^5˩?@Fmtf~:G?@?@;:y?@A^}/9*[Jȏ :g}= A?@I l?@C"<%0Md 8#(m?@'`sI@DͰMUh>OmHaoW?@&N?@?@?+%U OmHg6D~)cK?9?@_1n`Um,H_mH揷P,P 5b?@|XYm˗_mHOgOw^ G/6/?@$!?@d%FV^_mH+5gf 밷?@qK"`:= ?@6NZ`4olI5gN@S:3ί?@f?ōo ȡ4$1qY?t@ O=$rb K{ӞvD '炏ÏՏ"?V N x4(1(<1>=ŸD?8Aį߬X,@@cPx114<1U??$yίU@?`0 Q/TSi^1ATs]<kQ'/2qqGaQ! Q#`Qn%bC6a i@bpCa zq`%>q,W,@Q"c,qB@>q݁oӂ_=@`{k*#upq95"+aq@uo5ubO lҊq徶#Ž#UU)*/023456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdTUUUUUt4l"H$ @X,b'@ >cC-$qJ A;X:+,234567J89;U<=>?U@ABCUDEFGUHIJKULMNOUPQRSUTUVWUXYZ[U\]^_U`abcUdUUUUt4N"H$ @Y,b'@  C-dw A;Xn@QTRR@jPRRH<(H<(nE$Q}RE,kPRU`8z{%@&d2@D" a@ LFDTyB Фuh$T UF"H$ @FY,b'@Fx"D"uTN)O  1̙/2// UetTE<$KUijklopqrs )G]1i4Q(q> i4Hu:Di4-q>)1e8#>@%1e8;-1e8>D11e8>51e8>D91e8>=1e8>DA1e8>M1e8>DQ1e8>U5i4:5P ,6AWAxAA,AAA"lQ>Q__ oo=1 O!"  9 #aa 1` B scr,0f ungn` `Ca80lB<0u""afoo`c V r;b,0nd;rk^2r ` C<0n`e`t<0Jr11u1}uy q0qyu @@{^k!@@p8@@< U^?%/7 1>/?X%i/!#.o}2bvuTQE z  @"1#0P贁Nk[T2gc= A [[sѤ1 (~&!ߟ 4" Qz91wUpQ U/tQHzJ-p-(4"b) 9}Q 2ǡWԯ|%}Q:`Se*:&毈ƿ*)2į1Sri0Z`c-1 :&QX#'_2q$"q1~  }u14! -AÞ|o@@ZT-H$@@ZVy6!2Џ(DPVhzل~N`rH )5 L^%/ϾSevv%+=ӑ1IYk}R$dTPd+))zԼn@Nӟ?@<" ?FhK% V#y6淚h&a廒Mat!ڑ1 r%t"&'s&'%81tR?+d+PF +(ŔbR`+ A`c`n`bqlaQz<< ґ1yGYk}AyCV"M&_+_zo\V%_____(OL^vvC1}O/1'<&/JRJ)r\)@sn.//C1#v//8,8n;?_?q<_4#1??;#bzy6;B9ޟOEOWOiO{OOOFOOOuo_Bo/_|d\_n_*___uXoo:booop'ooooo 2ПVhzzN%0uèzԱy 0BTfxߜҏ,>Pbtc_ïԱ }4$Imկy4[;:2G0@'ര1',G9<H/ł #7!|@@`0!@Į~s\ Lr#&'vӲ (w.&'@sa#va# ϐߜ/e $D /A/*F]!ܱ-7/?4!@J;'?4ԱJ7),>Pbtx.cgCrd1*F+FBF6/H(FFFFFFF///r_//??(?|9AJ?}8Ak?}8CWܱAؿ갚1??'9KϠ]oρϴzD@ u iDss(-DT! ~FԠF&1 7ߚtV$r6qۜrdK_]__jaӒcQ` AcentConorz0BA= dvAco+sAh vϟ)7uN`U+˶ACoUogoyooooozo@ }D}X4FXʴFԴFĶ!jŜv X/ew0Bʏ܏x"4FXjώ@M tA,$6ﱯl~ZXo ۯL*N`r곁ɿۿ#n@_BP(L_Ei^pypπϔjzFtFՠ <ݚwN!1Cu/gyߋߝ߯ -?Qcu.]j*` MFEdpO5OyLyOOAm4O#O_KOokRG6x5 551$  oj Jd@ @@9N!@@S( Bp&od2?d?d{?)81fff?Sbfffffd/r, 1:(1*?<9eAs7|3Ik1 6PYa#ar.lH!PeaasQuH i8eT5borg]sprvp @Ho=4EW8@@Vjx?@Md䚕?QNQd*=W$//$w?>?P?Rb=s=5(lHPbt@%LI.l /h.Rq/ * I I x$"Im8h@/d,yLoJL@@Vj5\ jZ?@ x??[r;I[1I'Iu`F` CO25vAvA ` AcnpfODV`ETnpny6VOO3o1`9OK2 ;SyIe6* ` 9X+OfehZ v[Y_~ `9Dit,G6`9UMgii['?qo?@FP(?oEELn O95 u`?uph@mf fW uFi` zeRu|a2 ;C `ze*_K"C ze?mmob4ɓKϟs #z^p#a{ ݥ鯊ϥ\~"vT pApAN3ۄ@o1o1uaaQ Q7"B"9)x2:6@bra=>jՒ2G"b`bPŔ a~"'"@6%Nށ'!m'#~1 !CdpA(qQ pᷴ'#Nآm xҮ P*JR?@klkF@@u.*x 40%B%pQx5V@eδ翐ॣu@RO4a{craS`1bb@r#ӹg@l.@ hŐMQeɠ=% 1 U+puRlMs*@jށȘQȁ5rl>j$ʯܩ$Itm2m9#mIrjb}38b \ q7߹g ubkbWgz `z"!;aNa]k$bt{awlo'9K@oO:ZaGFdwM (ui&w?CG@RF`0ΘZ򟟎}y̌.O$ 1CUgyJԈ>_a&v_ǜ"ٖ.nRkٖ}"ϡQmfW7+M_qyRoݿiA[*&[?RSߜπPϯ{l$>eQQQQߴߞѺ1AJ)zo.?XUoy p ՟oRON@1&q_*5w?@c&?@1S9S_O$$׀a+/߼@ n/ VЖQ*JRe@_R_d_v__K/w:Nٿ\f@{<o.o@oRo doѿ_Zv9_oo$6qo}oZvR얧4X $(!#ҎWM @y<@4xҮ@PR4Xb4)ـe5؏;/l%#ROT*G.>i q!Tq‰╁‹4q'_2q꠼4ڋ!0†"` TextCoror zې0` La=inFAn=J3` Asaa&4` @mpr:dj ({ҧ}FuP(?!3. jϩuu аBEgCri9h qHɴ1TEL .$$KL‚a̱/q wѥ?Q@a)q}hGдOGј(/ߋL -d2L@@b@3~߾g~[r}; 1 k' u`FH?u8uq_'!b0ց!ِJz@V4uq<@&10ABCdwy@xRb8-դ&M #zx,wcUK>V) $(JJČTıSq$ !~ ~ 惥3*4upVp/o.̀A{Gz?Q? ` uSnaow?'9` UFlP[t9rJSu?P`` u[` 2VIN=-1 Acx`u)TGsaG'cyI"/+/djqjqO0S 񐒿0q40s"tgWWޢ5T,@74?qc;a`񊁄 QVC[yAjia,pLs┴x f MZ[?7@Њ3@Bu@u]a?qPbHwV[d3`LsFx/P/b/t//+U//?;WцQ-aagԑT-qA }??p8Q1 J?F2}~&q#?RkȆR_vjTk-z^E|?@U?el?@ ]{ ҵ?@iLyp&Ӝ4V]Y-tA?@e"9c?@ZOFIvPPtq?@:S[?@Qn?@3jvP?-qC0qp2Z`?@#I e?@t:s[O, ?@bַ(?@6\,k?@?z@>?@BT!0/T51e$ݧJA3V5@ ?hhU 6-vQ*$Qk/////UPt5weB+sliMy?????."?FrUM?6OKO]OoOOO?MeOF_i(ݧl\\_n__ϴ@x<@sn(ֿ@MZ[@आQp\5oYop8gyoF= ǤoloNVQi50Q ݢT0QPi u?u޵c0QW'2KqGz!'ۿ0Of"ݡ` TextCoϠorz` L}ainFn3` Aspa&ݡ4;` mpϠWj%{/NFҰ(@[?N!3gh\NǟuE-޵Srv /%Rpہt*yH&\WT5L.W1pVas1)etR4Q ԡ-!s1Q!QQaWhx-&kᴡiWkL Qd2L@@*"= @eN q[ar;1'uv&`Ft?ud_ࣄ)bZ!ෲ.@V1d 1<1.TQABCd&"2y1 dҧs3Uhb@kX!1DKJnذA1h0lhlKlhhhh h h h h B{FF 1Cc{Cb34WQd@/o.ƒ̀\ pA{Gz?hQk? ^` S۠a5'ow_` Fpl(UPtnSbu `` ` 2@@㫁- Ac\eĂ0LK`T߁saac(9`P Sy*a ` XOfQs(LJ: @ YO^H `DpcÁ6a`Mgi{i\𹃰 {W1X?F?@O@@xqyД@Le廒5b up`T?upM @m±h Qh q@s1F5` DRudgӶ `DDQQ4WqXwKP.i#{KRӠQQ0+̰@a6E/b apz%At-8kH1\ k"@TQb::4r34Nb99"ii74AA 94B:D4Rm"PP+='D>4D bAAcQmnwxb,,0!QCQDԢ;xHTT,@BᑀAu10Q H1CǁiOpC?tXb _ X,?B@a*Rr@[U@dU07ArA2G& OD24G*OC BxèE2N4_EŸK_F :ko}hWPèEBU?uwoD dooZèEm% 'A+ܹJEc!rm~ܶ@rSsv@ưV @uYH/yj|ɨEM,ԡUuU6_=D!4_xp1*qȢHㅭ5r6AlR4r__hQ$lVϨWLBv tˏ2ИpA#I1r\bGP31~ \p ưѣA!lR*[0r[0Œ'>Eq W|ư r */I'[.U:ߝN(F U"/ H(%%t'6O?qہ// {K/ +6Fd4)?//&M(p?/.3k'CP~/ ?-G)<'O?q?? OO1OCEROvO+fFV4t__OgE/-L F/8_=fF56FG?ok=_0}g?oo)o;oMo_oCOUOo3| g%oN% XuWfJ_\TfFlV_$E_'/MUl~hٮDf"{C]R|{h:[i\hY;èpʯܯqˁ5w?@ Ѥovԩ?@_1eX{?@#>RؘR_vj,uk-޾bz^-E|?@UtWel?@ ]{ ?@iL@&Ӝ4߆=Y-tA?@e"9c?@ZOF~uI PA?@:S[a ?@Qn?@3j -qװhu0q`P2Z`?@#I e?@t:i Dŀu[O, ?@bַ(?@6\,k?@?zNŀu'@>?@?BT!؝ $JUϢk@ 88%vFQI*m!/؟d @.@R†80t7'V WeBo90 .oo$&Œ/;X-ҏHo$&gq/d(JoUm///X!@x<@>m_ @X,d,M!U/Ϣ??p8,?OQOL/0B>|/0BD>/;B>/.B>09B>N0-B>{02B>0;B4>08B> 1IBi1JBTheDoc"Gestur Fom a FhrungsliieGuideSTR Normal&msvNoAWutCn 7ecPage-1"Zeichnblat -1visVerion*Byte oudrVai bl&Byte o]rvai7bl0Byte oudrVai bl. 1,Byte o]rvaiwbl.1"Zeichnblat -2Page-2EinfachBasicRe_chtkRectan7gl0Byte oudrVai bl.2,Byte o]rvaibl.202Byte oudrVa]i bl.31.Byte o]rvaibl.231Flus normaFlow NrmaBUmgekhrt sucwif_Klamr Revrs bac6msvPreiwIconCopT Pa g&msvStrucueTyp 4msvSDTarge_tIn eusc io 4msvSDCaloutNHi'ght(msvSDCalou]ttye2msvSDCalou]ttyen *msvSha_peCtgorisOrientaioHi_deLaruAtachToSide"Re_sizW_thTxSideLewadrBginLeadrE n(WHBoxInterUscin IsEndnteri oExtens7ioInsetSideMpont&fnMidp}ontOfse&CaloutuSyeR oCStyle1CStyle2CStyle3CStyle4CStyle5CStyle6CStyle7CStyle8CStyle9CStyle10CStyle 1CStyle12CStyle13CStyle14CStyle15CStyle16CStyle17CStyle18CStyle19CStyle20$Orient]aioR oOrientTopOrientWBotmOrient5LfOrientRghOrientAuoLeadrRotLeadrH inLewadrC7nt$LeadrMipontsuAsociaton$Design}fekt.1visUSETypemsvTheeInfomsvTheeyp(msvTheeLatinFo t(msvTheeAia_nFo t,msvTheeCoplxFnt6msvTheeLinTra sprncy,msvTheeLinPate r *msvTheeLin]Wigt.msvTheeLin_Rou dg@msvTheeCon _ectrwra sprny6msvTheeCon ectrWPatr 4msvTheeCon ectr]Wigt8msvTheeCon ectruRu di g2msvTheeCon ectrBgi .msvTheeCon ectrE d0msvTheeCon ectrE d2:msvTheeCon ectr]Bgi S z6msvTheeCon ectrE dSi z6msvTheeFil Tranprncy,msvTheeFil Patern:msvTheeSadWowr np rncy0msvTheeSad_owP tern,msvTheeSadowty l0msvTheeSadowXOfst0msvTheeSadowYOfst<msvTheeSadowM g_nifc 5to4msvTheeSadowDircton"msvTheeCol r$msvTheeEfe7ctVerbin dCo}nectr0Dynamische_r Vb7nd(Dynamic onetrTextPoWsiin2Byte oudrVa]i bl.3.Byte o]rvaibl.2 32Byte oudrVa]i bl. 3.Byte o]rvaibl.2 32Byte oudrVa]i bl.34.Byte o]rvaibl.2342Byte oudrVa]i bl.35.Byte o]rvaibl.2352Byte oudrVa]i bl.36.Byte o]rvaibl.2362Byte oudrVa]i bl.37.Byte o]rvaibl.2372Byte oudrVa]i bl.38.Byte o]rvaibl.238JUmgekhrt sucwif_Klamr.239(Revrs bac.239JUmgekhrt sucwif_Klamr.240(Revrs bac.240JUmgekhrt sucwif_Klamr.241(Revrs bac.241 Textan}mrkug$Text AUnoainuAtribuesBpmnId"BpmnCategoris(BpmnDocueta in&BpmnArtifa]cTyeBpmnNaeBpmnStae"BpmnPruoetis,BpmnPruoetis_Nae,BpmnPruoetis_Ty.BpmnPruoetis_Va7luLBpmnPruoetis_Valu_EUxr s oBdyTBpmnPruoetis_Valu_EUxr s o]Lnga9e:BpmnPruoeti_s_Cr_ela oBpmnText$BpmnCatego_ryRf$BpmnEleetTye6BpmnConectigOb]jTye(BpmnCoditT ye4BpmnCoditExres  RBpmnCoditExres _BdyZBpmnCoditExres _Lagu;g"BpmnMe]sagRf,BpmnMe]sagR_f_Nm8BpmnMe]sagRf_PruoetiBpmnMesagRf_Proeti_5NmBpmnMesagRf_Proeti_7TyDBpmnMe]sagRf_Pruoetiw_VlubBpmnMe]sagRf_Pruoetiw_Vlu__ExrinodyjBpmnMe]sagRf_Pruoetiw_Vlu__Exr]inLn uPBpmnMe]sagRwf_roeti_uCrel o2BpmnMe]sagRf_Fro RBpmnMe]sagRf_Fro Prtic+pn.)TyeBpmnMesagRf_Froj Rl FBpmnMe]sagRwf_ro Eti'y .BpmnMe]sagRf_To NBpmnMe]sagRf_To Prtic'pn%Tye>BpmnMe]sagRf_To 5Rl BpmnMesagRf__To Eti#y  BpmnDirect o&Textan}mrkug.49*Text AUnoain.490msvWarwnOPesoal5If3hS3G3T3%G33$G3XT3E3T3G3`4A3`4A3`4A3`4A3Tg#4A3M'4A3a+4A3 a/4A334(G3hS[4G3p4%G3a4A3M4A3T4G3a4A3$a4A344+G3d4(G3܂51G3F5.G3Ԉt5%G3hS5G3iS5G3lT5E3K35G3K35G3M6A3M 6A3,a6A34a6A3G3 >"G3,B> G3b>,G3D>*G3t>.G3<>8G3?/G3M?+G3|x?.G3T3??G3<?8G3@5G3=R@9G3@4G3$@0G3\@2G3D=!A;G3=\A9G3=A8G3A/G3>A:G36B2G34hB/G3̆B2G3B2G3D>B=G3<8C7G3ToC%G3C'G3M3CG3M3CG3tC0G3dD)G3HD G3hD0G3D0G3D2G3TD0G3*E2G3Ĉ\E0G3E2G34E0G3lE2G3 F0G3܉PF2G3F0G3LF2G3F0G33GDG3XG)G3 3GDG3G)G3T 3GDG32H)G3Ta[HA3\a_HA3ԋcH#G3H%G3THG3jSHG3,H&G3$I)G3T+I)G3N3TIG3 N3mIG3TI$G3I.G3I-G3J1G3 38JIG3dJJPG3>J9G3rK4G3K(G3K1G3JK=G3$JG3D!3IM=G3!3MCG3MZG3#N]G3JNJG3,N1G3JNKG3!3FO5G34"3{O<G3dO-G3DJOGG3"3+P3G3"3^P6G3P$G3DP)G3tP+G3 Q2G3da>QA3laBQA3taFQA3|aJQA3aNQA3aRQA3aVQA3aZQA3a^QA3abQA3afQA3ajQA3anQA3arQA3avQA3azQA3a~QA3aQA3aQA3aQA3bQA3 bQA3bQA3bQA3$bQA3,bQA34bQA3RA3lcBRA3tcFRA3|cJRA3cNRA3cRRA3cVRA3cZRA3c^RA3cbRA3cfRA3cjRA3cnRA3crRA  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~     ]>rFxml:spaceprsrv]rFxmlns:vdhtp/%scea .i+ro of.+o/i iA/20*c6%et/ \n]rFxmlns:v14htp/'sUcea .i-ro oUf.-o/GUf=e'v=s=]o'200'Eet1 lnU&U-.t4 "H$ @Y,b'@  l!}C-dq+7 AU %t4 [l C>-D`7"AJ@ Q5lMR@Ql7RH<(H<(JEQmREQnR{dm]  g"4FXo 8)jJ(N1j@(yV4](\Ϡh^H }B`Z P`u~āz`S3 BvR̿R$TO!1ҡΥ)nPW'<^E+~1trd}51P24vR\$PjB="dikzK&DWGY ?pl*'"Dqn~%!borgbackup-1.1.15/docs/internals/security.rst0000644000175000017500000004524013771325506021242 0ustar useruser00000000000000 .. somewhat surprisingly the "bash" highlighter gives nice results with the pseudo-code notation used in the "Encryption" section. .. highlight:: bash ======== Security ======== .. _borgcrypto: Cryptography in Borg ==================== .. _attack_model: Attack model ------------ The attack model of Borg is that the environment of the client process (e.g. ``borg create``) is trusted and the repository (server) is not. The attacker has any and all access to the repository, including interactive manipulation (man-in-the-middle) for remote repositories. Furthermore the client environment is assumed to be persistent across attacks (practically this means that the security database cannot be deleted between attacks). Under these circumstances Borg guarantees that the attacker cannot 1. modify the data of any archive without the client detecting the change 2. rename, remove or add an archive without the client detecting the change 3. recover plain-text data 4. recover definite (heuristics based on access patterns are possible) structural information such as the object graph (which archives refer to what chunks) The attacker can always impose a denial of service per definition (he could forbid connections to the repository, or delete it entirely). When the above attack model is extended to include multiple clients independently updating the same repository, then Borg fails to provide confidentiality (i.e. guarantees 3) and 4) do not apply any more). .. _security_structural_auth: Structural Authentication ------------------------- Borg is fundamentally based on an object graph structure (see :ref:`internals`), where the root object is called the manifest. Borg follows the `Horton principle`_, which states that not only the message must be authenticated, but also its meaning (often expressed through context), because every object used is referenced by a parent object through its object ID up to the manifest. The object ID in Borg is a MAC of the object's plaintext, therefore this ensures that an attacker cannot change the context of an object without forging the MAC. In other words, the object ID itself only authenticates the plaintext of the object and not its context or meaning. The latter is established by a different object referring to an object ID, thereby assigning a particular meaning to an object. For example, an archive item contains a list of object IDs that represent packed file metadata. On their own it's not clear that these objects would represent what they do, but by the archive item referring to them in a particular part of its own data structure assigns this meaning. This results in a directed acyclic graph of authentication from the manifest to the data chunks of individual files. .. _tam_description: .. rubric:: Authenticating the manifest Since the manifest has a fixed ID (000...000) the aforementioned authentication does not apply to it, indeed, cannot apply to it; it is impossible to authenticate the root node of a DAG through its edges, since the root node has no incoming edges. With the scheme as described so far an attacker could easily replace the manifest, therefore Borg includes a tertiary authentication mechanism (TAM) that is applied to the manifest since version 1.0.9 (see :ref:`tam_vuln`). TAM works by deriving a separate key through HKDF_ from the other encryption and authentication keys and calculating the HMAC of the metadata to authenticate [#]_:: # RANDOM(n) returns n random bytes salt = RANDOM(64) ikm = id_key || enc_key || enc_hmac_key # *context* depends on the operation, for manifest authentication it is # the ASCII string "borg-metadata-authentication-manifest". tam_key = HKDF-SHA-512(ikm, salt, context) # *data* is a dict-like structure data[hmac] = zeroes packed = pack(data) data[hmac] = HMAC(tam_key, packed) packed_authenticated = pack(data) Since an attacker cannot gain access to this key and also cannot make the client authenticate arbitrary data using this mechanism, the attacker is unable to forge the authentication. This effectively 'anchors' the manifest to the key, which is controlled by the client, thereby anchoring the entire DAG, making it impossible for an attacker to add, remove or modify any part of the DAG without Borg being able to detect the tampering. Note that when using BORG_PASSPHRASE the attacker cannot swap the *entire* repository against a new repository with e.g. repokey mode and no passphrase, because Borg will abort access when BORG_PASSPRHASE is incorrect. However, interactively a user might not notice this kind of attack immediately, if she assumes that the reason for the absent passphrase prompt is a set BORG_PASSPHRASE. See issue :issue:`2169` for details. .. [#] The reason why the authentication tag is stored in the packed data itself is that older Borg versions can still read the manifest this way, while a changed layout would have broken compatibility. Encryption ---------- Encryption is currently based on the Encrypt-then-MAC construction, which is generally seen as the most robust way to create an authenticated encryption scheme from encryption and message authentication primitives. Every operation (encryption, MAC / authentication, chunk ID derivation) uses independent, random keys generated by `os.urandom`_ [#]_. Borg does not support unauthenticated encryption -- only authenticated encryption schemes are supported. No unauthenticated encryption schemes will be added in the future. Depending on the chosen mode (see :ref:`borg_init`) different primitives are used: - The actual encryption is currently always AES-256 in CTR mode. The counter is added in plaintext, since it is needed for decryption, and is also tracked locally on the client to avoid counter reuse. - The authentication primitive is either HMAC-SHA-256 or BLAKE2b-256 in a keyed mode. HMAC-SHA-256 uses 256 bit keys, while BLAKE2b-256 uses 512 bit keys. The latter is secure not only because BLAKE2b itself is not susceptible to `length extension`_, but also since it truncates the hash output from 512 bits to 256 bits, which would make the construction safe even if BLAKE2b were broken regarding length extension or similar attacks. - The primitive used for authentication is always the same primitive that is used for deriving the chunk ID, but they are always used with independent keys. Encryption:: id = AUTHENTICATOR(id_key, data) compressed = compress(data) iv = reserve_iv() encrypted = AES-256-CTR(enc_key, 8-null-bytes || iv, compressed) authenticated = type-byte || AUTHENTICATOR(enc_hmac_key, encrypted) || iv || encrypted Decryption:: # Given: input *authenticated* data, possibly a *chunk-id* to assert type-byte, mac, iv, encrypted = SPLIT(authenticated) ASSERT(type-byte is correct) ASSERT( CONSTANT-TIME-COMPARISON( mac, AUTHENTICATOR(enc_hmac_key, encrypted) ) ) decrypted = AES-256-CTR(enc_key, 8-null-bytes || iv, encrypted) decompressed = decompress(decrypted) ASSERT( CONSTANT-TIME-COMPARISON( chunk-id, AUTHENTICATOR(id_key, decompressed) ) ) The client needs to track which counter values have been used, since encrypting a chunk requires a starting counter value and no two chunks may have overlapping counter ranges (otherwise the bitwise XOR of the overlapping plaintexts is revealed). The client does not directly track the counter value, because it changes often (with each encrypted chunk), instead it commits a "reservation" to the security database and the repository by taking the current counter value and adding 4 GiB / 16 bytes (the block size) to the counter. Thus the client only needs to commit a new reservation every few gigabytes of encrypted data. This mechanism also avoids reusing counter values in case the client crashes or the connection to the repository is severed, since any reservation would have been committed to both the security database and the repository before any data is encrypted. Borg uses its standard mechanism (SaveFile) to ensure that reservations are durable (on most hardware / storage systems), therefore a crash of the client's host would not impact tracking of reservations. However, this design is not infallible, and requires synchronization between clients, which is handled through the repository. Therefore in a multiple-client scenario a repository can trick a client into reusing counter values by ignoring counter reservations and replaying the manifest (which will fail if the client has seen a more recent manifest or has a more recent nonce reservation). If the repository is untrusted, but a trusted synchronization channel exists between clients, the security database could be synchronized between them over said trusted channel. This is not part of Borg's functionality. .. [#] Using the :ref:`borg key migrate-to-repokey ` command a user can convert repositories created using Attic in "passphrase" mode to "repokey" mode. In this case the keys were directly derived from the user's passphrase at some point using PBKDF2. Borg does not support "passphrase" mode otherwise any more. .. _key_encryption: Offline key security -------------------- Borg cannot secure the key material while it is running, because the keys are needed in plain to decrypt/encrypt repository objects. For offline storage of the encryption keys they are encrypted with a user-chosen passphrase. A 256 bit key encryption key (KEK) is derived from the passphrase using PBKDF2-HMAC-SHA256 with a random 256 bit salt which is then used to Encrypt-*and*-MAC (unlike the Encrypt-*then*-MAC approach used otherwise) a packed representation of the keys with AES-256-CTR with a constant initialization vector of 0. A HMAC-SHA256 of the plaintext is generated using the same KEK and is stored alongside the ciphertext, which is converted to base64 in its entirety. This base64 blob (commonly referred to as *keyblob*) is then stored in the key file or in the repository config (keyfile and repokey modes respectively). This scheme, and specifically the use of a constant IV with the CTR mode, is secure because an identical passphrase will result in a different derived KEK for every key encryption due to the salt. The use of Encrypt-and-MAC instead of Encrypt-then-MAC is seen as uncritical (but not ideal) here, since it is combined with AES-CTR mode, which is not vulnerable to padding attacks. .. seealso:: Refer to the :ref:`key_files` section for details on the format. Refer to issue :issue:`747` for suggested improvements of the encryption scheme and password-based key derivation. Implementations used -------------------- We do not implement cryptographic primitives ourselves, but rely on widely used libraries providing them: - AES-CTR and HMAC-SHA-256 from OpenSSL 1.0 / 1.1 are used, which is also linked into the static binaries we provide. We think this is not an additional risk, since we don't ever use OpenSSL's networking, TLS or X.509 code, but only their primitives implemented in libcrypto. - SHA-256 and SHA-512 from Python's hashlib_ standard library module are used. Borg requires a Python built with OpenSSL support (due to PBKDF2), therefore these functions are delegated to OpenSSL by Python. - HMAC, PBKDF2 and a constant-time comparison from Python's hmac_ standard library module is used. While the HMAC implementation is written in Python, the PBKDF2 implementation is provided by OpenSSL. The constant-time comparison (``compare_digest``) is written in C and part of Python. - BLAKE2b is either provided by the system's libb2, an official implementation, or a bundled copy of the BLAKE2 reference implementation (written in C). Implemented cryptographic constructions are: - Encrypt-then-MAC based on AES-256-CTR and either HMAC-SHA-256 or keyed BLAKE2b256 as described above under Encryption_. - Encrypt-and-MAC based on AES-256-CTR and HMAC-SHA-256 as described above under `Offline key security`_. - HKDF_-SHA-512 .. _Horton principle: https://en.wikipedia.org/wiki/Horton_Principle .. _HKDF: https://tools.ietf.org/html/rfc5869 .. _length extension: https://en.wikipedia.org/wiki/Length_extension_attack .. _hashlib: https://docs.python.org/3/library/hashlib.html .. _hmac: https://docs.python.org/3/library/hmac.html .. _os.urandom: https://docs.python.org/3/library/os.html#os.urandom Remote RPC protocol security ============================ .. note:: This section could be further expanded / detailed. The RPC protocol is fundamentally based on msgpack'd messages exchanged over an encrypted SSH channel (the system's SSH client is used for this by piping data from/to it). This means that the authorization and transport security properties are inherited from SSH and the configuration of the SSH client and the SSH server -- Borg RPC does not contain *any* networking code. Networking is done by the SSH client running in a separate process, Borg only communicates over the standard pipes (stdout, stderr and stdin) with this process. This also means that Borg doesn't have to directly use a SSH client (or SSH at all). For example, ``sudo`` or ``qrexec`` could be used as an intermediary. By using the system's SSH client and not implementing a (cryptographic) network protocol Borg sidesteps many security issues that would normally impact distributing statically linked / standalone binaries. The remainder of this section will focus on the security of the RPC protocol within Borg. The assumed worst-case a server can inflict to a client is a denial of repository service. The situation were a server can create a general DoS on the client should be avoided, but might be possible by e.g. forcing the client to allocate large amounts of memory to decode large messages (or messages that merely indicate a large amount of data follows). The RPC protocol code uses a limited msgpack Unpacker to prohibit this. We believe that other kinds of attacks, especially critical vulnerabilities like remote code execution are inhibited by the design of the protocol: 1. The server cannot send requests to the client on its own accord, it only can send responses. This avoids "unexpected inversion of control" issues. 2. msgpack serialization does not allow embedding or referencing code that is automatically executed. Incoming messages are unpacked by the msgpack unpacker into native Python data structures (like tuples and dictionaries), which are then passed to the rest of the program. Additional verification of the correct form of the responses could be implemented. 3. Remote errors are presented in two forms: 1. A simple plain-text *stderr* channel. A prefix string indicates the kind of message (e.g. WARNING, INFO, ERROR), which is used to suppress it according to the log level selected in the client. A server can send arbitrary log messages, which may confuse a user. However, log messages are only processed when server requests are in progress, therefore the server cannot interfere / confuse with security critical dialogue like the password prompt. 2. Server-side exceptions passed over the main data channel. These follow the general pattern of server-sent responses and are sent instead of response data for a request. The msgpack implementation used (msgpack-python) has a good security track record, a large test suite and no issues found by fuzzing. It is based on the msgpack-c implementation, sharing the unpacking engine and some support code. msgpack-c has a good track record as well. Some issues [#]_ in the past were located in code not included in msgpack-python. Borg does not use msgpack-c. .. [#] - `MessagePack fuzzing `_ - `Fixed integer overflow and EXT size problem `_ - `Fixed array and map size overflow `_ Using OpenSSL ============= Borg uses the OpenSSL library for most cryptography (see `Implementations used`_ above). OpenSSL is bundled with static releases, thus the bundled copy is not updated with system updates. OpenSSL is a large and complex piece of software and has had its share of vulnerabilities, however, it is important to note that Borg links against ``libcrypto`` **not** ``libssl``. libcrypto is the low-level cryptography part of OpenSSL, while libssl implements TLS and related protocols. The latter is not used by Borg (cf. `Remote RPC protocol security`_, Borg itself does not implement any network access) and historically contained most vulnerabilities, especially critical ones. The static binaries released by the project contain neither libssl nor the Python ssl/_ssl modules. Compression and Encryption ========================== Combining encryption with compression can be insecure in some contexts (e.g. online protocols). There was some discussion about this in `github issue #1040`_ and for Borg some developers concluded this is no problem at all, some concluded this is hard and extremely slow to exploit and thus no problem in practice. No matter what, there is always the option not to use compression if you are worried about this. .. _github issue #1040: https://github.com/borgbackup/borg/issues/1040 Fingerprinting ============== Stored chunk sizes ------------------ A borg repository does not hide the size of the chunks it stores (size information is needed to operate the repository). The chunks stored are the (compressed and encrypted) output of the chunker, chunked according to the input data, the chunker's parameters and the secret chunker seed (which all influence the chunk boundary positions). Small files below some specific threshold (default: 512kiB) result in only one chunk (identical content / size as the original file), bigger files result in multiple chunks. After chunking is done, compression, encryption and authentication are applied, which influence the sizes of the chunks stored into the repository. Within our attack model, an attacker posessing a specific set of files which he assumes that the victim also posesses (and backups into the repository) could try a brute force fingerprinting attack based on the chunk sizes in the repository to prove his assumption. Stored chunk proximity ---------------------- Borg does not try to obfuscate order / proximity of files it discovers by recursing through the filesystem. For performance reasons, we sort directory contents in file inode order (not in file name alphabetical order), so order fingerprinting is not useful for an attacker. But, when new files are close to each other (when looking at recursion / scanning order), the resulting chunks will be also stored close to each other in the resulting repository segment file(s). This might leak additional information for the chunk size fingerprinting attack (see above). borgbackup-1.1.15/docs/internals/encryption.vsd0000644000175000017500000027700013771325506021552 0ustar useruser00000000000000ࡱ> Root EntryRoot EntryF?=VisioDocument hSummaryInformation( DocumentSummaryInformation8  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~VisioInformation" ՜.+,D՜.+,HP X`hp x `  Zeichenblatt-1SZeichenblatt-2SByte oder VariableK Rechteck VaFunktion / SubroutinemmKlammer Umgekehrte geschweifte KlammerDesigneffekte.1Dynamischer Verbinder KTextanmerkungrb ZeichenbltterMaster-Shapes0|_PID_LINKBASE_VPID_ALTERNATENAMES _TemplateIDATC010498411031Oh+'08@LXd | Microsoft Visio@Z?=Visio (TM) Drawing hRNguR8>wL>vg>?p9[!i fTd',50fZOlO~C.ߐE#PQc:  A ,U345&6&9Eaaj6 8d7?!%3 URg L-ak@9$KtiQHeuDDT(!cp;/paM-uh\`(+AT %5+H>>5a%h34ovJuX0Hp(!MmcP=?`(z?84?D0pA D7!U/i 4?Q`_1_JD;a3(/@}_L/^/L%a$G[aY %//// ??.??R?d?v???V[l_ru@+y_w-uaDd/r*&oo*o_Qv4DOOO\U$/@__o/DA/Gj:d2(joG2O5j7@oo ooo?&s2;IO[Odd 6 S( $b4@SyYkO(Or&̗T4RDP@+&pqprg-.UEd \UhřϽϏ____"LwnU2r?GXDeTeP贁Nkcc#E_挿¿{%ܶ/o Mv{&pqo s'9K@]o߁3呧G(x,+1fߣSBuD'l*|l$6&w-n!1q;qHVN`amF"%ȓz䥍 / /@RdvMIySe-T"bbbb$T$bb~9bIbyb~9b3c'쩠? 4<}J~>?!OXOpuOGK59&O@_Ǿٹ(n4Y-?@UABCDUEFGHUIJKLUMNOPUQRSTUUVWXUYZ[\U]^_`Uabcdt47"H$ @Y,b'@ b@C-D@p AUvefgUhijkUlmnoUpqrsUtuwxUyz{|U}~t4"H$ @Y,b'@ I_C-d@) At4"H$ @Y,b'@ A-@7 ;z23456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcd*t4^"H$ @Y,b'@ 6C-@ A;X4@,k/MR@k/lNR@ l/MR@|l/7MR@l/NR@\m/HR@m/lNR@bjE kC  e^| 7  3u ,7=OasNDas Shp}eaufudZicenbl]t) ze.HD   3 B>Tp[/Lp?as&;.h> ,b MJ=UF~?FV*JR?F\.?FM&d2n?Q6 U AMA333u` ? !+5ChMW]u$g>5 L@X!$5 /`Vis_81.chm!#4E"67M `_!b-"#"'?9h:AiA 9 $''2ǥqE0&!$E0&!qP \@6%A]Dc]3V:?Ka!Db1OCK%YEiO{OL@ODJv%YKOO"NBSB53^@M^U_g_y_Rg@14ot'2TALd"2(Ech,@!3qlf1U1 m3 =M!l+TM9M]a6 `(an|o@0~ o֊UFD  h>$T 6D UF~@xTiC;'h>4bP9 EUF~?L&d2?F\.?Q6 U AM| ;;u` ?*#)3=KU_euo>5 L@X!#$5 /`Vis_Sba.chm!#57_ 2#`$g!-*#*'?9  %&/7 aJ=?Iy+#&Z&P?B#2N贁wNk?@J'q ,A MVq6q4!*#ekab1; ?Eq6 I-BCb:@e2C1%>> OO1EA5~%5]NGxOC9AOI%\O`JOOrn_gPo|'TAFd" (?cp,@&!iA%9 -$b]'/2q`&!gE0f*laUmHE?KlT(]9^]-Ma6 `r (QJE-Os~%_s%/\h}Mf!>zbcrA bb HUr ~L)uE/>F:*#JJB d]Ban']@0]CPUFD# Th$/T YTYBBUF~?x<F B_P(?3P?Y ȅH?BT ?2~B 03B`T9>R4@Xflh>, bE=UJAUF~?F#H$??Q6 AEDA33333#300 0 0 0u` W?!+5CMWaku@#u>U5 _L@V"JXh#!$5 /`Vis_81.chm!#4"3 `!k(_%n>J-h)_##' ?9q!0!!g}PVh#@>J5A,aG"bCV"1_%>?O!O3G!>H%;iO{J1BCB6CL,155OOL=A_YM+^|>!C_U_g_yXXE+____yXX3;?&oOZe 1O3uniSoooUo3(2Oo.oORo3H_oYo`bO&I5rn7I[oEg 7TRA?"(8F,@!$0l#1U#H3 ?k!b\(Tm9T %=+EͿ}/ h>8 b 3 AUJhAUF~?F~2 ?FP-DT!?>`AZ ,<E6  ,^_@Aְu`?r1|iuu u b]u  - #;@e"(($!>1r?bX?$-',`",/ U-'(4""#//<=(&d_2?FMl2r(}??Q(> rm3Qa+93/% bb#4r925)#8#115?a6I"\[#2qq?2TUfA?GE?\.l ?A8[5[#5 82LF%X[#)Q.T5 2`Vis_81.chm!#4XR49B'I"AR%X23JU3 5S5W <3iAE9 V"T 2X'2@@V!TE0fc1d3 [5Y@fs4 ?j?a9792 Dx,Ds^r;"mDsr"r,rr@w@r"Jrw,Jrr^|"hsvrDs"YY6r;0@?qs% rE9h9h 4$!2BӸx5 1 ` cownt0olRP.x2Ώ~$ nAx ׉ 7|aiCaR%[#o!vş=Ӂ q1S.yZPT&镁 ;;pqGT;;AEEO(ɣ @)QҮ5yA;`K`abPme0 JRPklPPneXRsP' 8E.&c672;[2[5 b1( H>6wUC]da@sZUaXdpVsPPtPPW|VqEA2"EsB!jooQXDVf!3xgyfTog_nI)7N?iCg;̠AE W[2iX83T,@1QclU * O(4[$9a8ӁP2I"X8[$vvxbT55CEQg?y1 l" @ }?1HA@&0s95B[$X,bFZV q0gy 1 U~  $- O^!2w ȣP(  vy# 2!mrHUs }DKt9DNW~BF;(I#e@*dB g]ao]@d]PUFDfP h VTB UYuU??Fxha T,aaUQJ\UF BP(?@?F~?$-?P nL#l]dV^ ] $g]Q"U"Y*"]>"~#<?&#A&Br&C"u` ?PYu"bl""& M&u"u&u"u"u"u" ,u" ,u"4,u"H,66t#\"u"///$u)ar;21'u `Fxu21yB}BAABA"!^hO$,@ZLupO耑 @Ca4R#qt{u*gvXWcZufvvWuTj*uX{u ]ʤ *aGIJKMNOIQ"R$QTܝQVWXYC3333.3>3N3^3n3~33333333@33Vf4b@Vqf0φЬ uZ1[]^s@333'373Gc ƢEt&EAxÎ;BA}@c"3"3."c Ӣp x/3,S/e ~/iRfSIA62ab ZaLxVIǪ *:Jzךת'OgEa@braĠqAQJ`FWO@r ЀiC}F@`I@)@AEBsZeȓ3U ,puӐe(p 4>{ScNV:fcfR:bWKqP?@pA3VUbQw Sio@(h#FtϿF@ qRfa+_i&7=akVQhaRkQclQohaR2brˡF@#I~rW{ 3~TE}O bqO cy`I@Ё`sVRڀfK@ڀbooOlw wKuK`1s*8zK@JqyozI@ eoo /uB s!xx7SlP4;walPe,0kNq @Jq.-8r JqM9-t '-E1*dbt+r sQJQLŌH{M0tb;{O,=CƁS=ԯןƋM2'Kx~{עamzD1-?F|ǓnJ\˟ݟITjU˫ˬ7F:-˫YkIՎ_TF ?BP(!RQ!4 Iz{7 F.!`}~k QhaqY.!(SFxfb@&7! kRka},1ff",1~,1ff ,1WVHf312E1kaB12lQ12a(712,1HfWV3UYkaO} mlQYBaYO(B,1WVHf3ka lQ"(;" FHfWV3Y"]#kaS#" q#lQ]#"]#S#"QoDcjWV3,1HfEFWVCaHf``5d 䰈qGA8rL5k0BAK6 k0L5k5L5|0V!CBL5 = |5QQ5&t?U: =|5vŦ547 P@QzqE4HQ7b&qn24Dp)CTU+0F2o$0jjSRjaRD9Q#eM1sB=yIZR1s^lZLV_4pQL׌HH.5Oa 5.. ^fCmnfv3p` T]`avelShift`$xa_d@a3p`OΠ`fs`m!*_a@@ VV%Sr V:67sGr:2!l7s[r 27.1i$3s 1T$y@a@dvkQXU.q.'2rqty!,0}v`n$qqt7v-x8cʁEl@baa%]1]1!H$rI*J!1LLLDŽkq+MԄ .NAOAAsQRe.S"T/.rq.qqAWVqqFYpRBZ}/b[.)\r].QLQ_1(A`ٓza(.44.;[Rx a;`"[TBf"< 1W`B`sh]`cu g`f&+o]`m`t`l`g`l*?`0qH`.:jx +\T`x`bdvq_v!L5~*ϫ16DP`)_R`Ht%k` 7DVhzڠ2xQBuݿ)/\BanAM_0qx2Ϛ0AP*(O`a`*4dFxgy2?Wʷշۯ-K08&4Fb[mD{ǿDD!s 1ϱPGYϋ(6ѲĢϩȩ5hA`# &AsDEy3 UĻɯAv^2`OnwAQ9)(ĽBT0fAvH`Uta)Yp!3EWLnew@)ncbā Av9H`ER#t6Hl ~)99:'ϟoP$tm-At!BT)ங'7İDZEBs&1)/I4+r/o/Av(* t/% *Pa#/?4Vir4F:?@^?mAiCOOOO6O=nO Z\9?K8/ `v GAAtPT&xJb=rb+ Trt*P@?B#o54trptb T mkqőԁw`3P?squ{tcF~ϿF@ Y%ukp{`?u %,Vrq @V4`+AszSPsT3 )5>%j{ိqi!stgq xŕ2htbrQ? !3'󿂱/0 c{[яJar<`Ѡ 5 %*5}9ᙓ @r^rlA$Q2U lQQ؉uyhU+424r@1Ryɒ^rx@7.?@hzi?wԐH}߈7c6z#o/|UD`a1H_\"p1@&WrBdeKTv7.!ԟ@V) `aGo,o>oPobo1qyo>TooODowQS+F搞R6);=[vrrQv>LUF+q StFUevS߅x$~&qޗ݅ԯ1).UaO|tǢKT*ϰd4)'ߕr|<ߒM2/WkNȏ%rQ|evʭVlߟU[#Իg.gM1xweۿoo1CBSCS/e/w/S~eB]v KcVFO/GXiPdU:Ǽ ڄE-,/wb/cƻ͟E/?'?*&)BOZlcƄdUO7c*&tG);AEOb1ҿ)M_Cb.ϛ]!_R8Vb=x](' _ ݂?z-sT_Na2s/@.g(O]Sa5 _;;L7pwrra2wcCB|WV.gWi gU1gtO@|{럘p  D g,l;;.o@o*aa Oo0Ґjo2kJΆb`b~Mk 2(s.®%)nd\S?l$`-@K ay!*t1K쐴^VQ2CIUdUpς1aanOa7P7߸y!X16BòJa@a WE1!UަQb@72Ce$6N ¸ջVcIpb)L(aO[Ne4 (ga# f8 \ 2d T!bu-(;M_qڋgֿ`4@?F\oA`0ΉjM09 Q(3b8ye/˸?FƣE?Fp5Ys?F5&!Q@Ή( 0_Bԃa2U0*s3?ܵ0Ԃ2 k+ݓ0]'%Qn?Fѡ?FW^bd%?F?Ԋu-Q/c,DJ}?5^I,QIƥ,$~//'% ?F*^Y?F[E?F|{T?c,|гY,vݥ,-?C6*~??b'%OZ`?Fa)P?Fů$_?FPvϟ'͚?c, h"lxp_Qҏ,mV}b!)/L FAOSO+~'%`0DLJ?FNqVUO:Oc,~jt7A`,ףp= ,7d__Kh'% QuU?F`4pF?FLAD?X@t]_c,io?JY8FZ4? qۄV$N@a__'%X/~%l3!?FeY?FJ~ӗ oc,z6>LZ4kw#awivQooj'%'-q?FmUa`2#+?F(%oc,Y7 }j GfZ4 ~,ClL^!UFV{WE?FcJΦc  )?H},""3\ \ -YVqjǻ )1̶ Ѡn<δ m|ѠaQaCSVGh4cfUpi$5,)֝F3L&AP"iЧ Z3*p#q8 |`rXӗze2ZՊ@ #5.?\zi{B  ȧ#ޯ&&JJ\BMV㖿t侜+Ģz`rl%ǨΩ)1@8"4߀Hߒƿ./1*Tύrˤ]c-=( @=5-2-9#.@"~C Ȓ#@ 7?D`Ofrsetx %q,kڡv hE 5ɞ)3U+KGAb TrKvlԐhi"//ׅDzW280//?(?< ????QcOwOO)O7(QOأTx7HUI' Xߞ^Ba5a8_A88F;2/#3wB oD4@ekFo<0Hoj(PUFDfP h-VTYYU?"H$ @?Y,b'@?xTT;͑U@ !@ү&P6 lAu` u(Jݩ c?EDe34 D   J ũ2qL lvF BP(?L0 `LineTray sp r{ ncyz۶&` v%P t e n&$`u v%W{ igTyt&a` v%_Rouy dw")g&A0'2A{Gzm?>5Q?A^n"`Fw lo0~/+n"j?|?+5/!4`S:Ta ow?<*+` 9?& ` 9S5@y0eK $KXOf@s{ 'B $KYtO 6`9UM@gy i@i04Ai '. `%9Dw AcNul>k$Uhh2Q2QEXT/J> 09([_q_[_o!otgU__o_oTEooo,o1A>Te\Ls c!2TCt1MU$U<"itjt AktE*Q*QURmtQ!!otU p qU r s-3"!!uGRvTlwaExn11Uz{Y|})~ARɄ22քeㄐqJ"T c 14.m"?<%Rpsf0M8@H8 MEJUU8!r5GrK&HZ&H͟@G+G3"?GERml&Jlmyome>mGG-GAUV>6HԂyϿHႻFSP6HuANqe (=MQ_'0Uhv$on!3` LDnFxB'3` A@iBݞ"4` C m@bAxB}lewL9WIFE/HU= _6p9yEW49dF@]#'^B ;$]@e$k&o+d@(]:aG4b]PUFDf h-TYYU?~@x]@L'} V6lX-1u. Bj2u2Z2j2#u9r#ULH/MZ1+B#AD5 60`Vis_SE.cTm!#20AD%`>Copy<0wigTt ?DU1f@M)@c<0o+@'ofdBUArX@Aad@iV@n3@ WAl@ef@R@6AtBvBb@hB@lBAJ@=## AoG?9/#n"E&444 7I#$@HB59_lj#"SV6U8*l>(Uhn E /J$&9"(Ln eg5Le#pheo'T !! Js~o/|R12'"U7A%O3W_7xr5(-6 `F2 xqu\J@#:0b)&`AAs)@E T@xd@Di e i=M]UJh#HT0#3 UEJ\}|U@f?@3!?F~?FL&d2ٺ(1?P Ǜd#pa HkaQ"U$"Y8"Mx#BU<&?&@&A&C"u` ?F]o"fpy" & G&o"o&o"o"o"o",o",o".,o"B, 6 6n#V"o"///$u)ar;21'u `Fxq21uBAy B AA21"!XhQ$,@pup`Ca$Rou@+u*LfXQc* IRg fbY-H- M ZU ZZZZUZZZZUZZZZUZZZZya-yyIvJvqyyMvNvOvMyEQvRvQyTvTQyVvWvXvYv3* j|%5EUeuu 4 蕺!BZ)CT@x@U @n@o@a@%iEPn*Љ4J1[v}y]v^vc@x3+"" qؗAU2Aym@xcv$"" zp$E1u/w3,#? N?$hVW9AB[bSq20S hV1Aqׁסױ!1A7Ig?abrXqkiqruׂ6rS@6'_@rp9SR2MV@׌aV @aU-ܕ?XrcU:uJed*ɮ᧤N@q37sfjBA'b6bjB'#KPRQʾqjeArQ c@(3FA{aٿA3b!&»F_ v7_M%;f!xjB!s(a?xjB,2brrP#VFI%r'#7 3%Р$UM_ }uj_2 ׂP5GׂWsZ2bQwvPQrT_@|wu‹ׂPc?P 5uR܉͏Cgyc(` q(`e80{A @9-܎ Ʉ '؏bAP42Dr \sya@Q_X俋NZ]b6 O6͒z{ # Z{u£@%ЊׅqmϕDR1oL^c,ߛѯgZ $j%{|ב R{iۄ);qσ⥞occL&d21b}aN/9K!KG'!cpK1a81YNSFxBD $e0Bof$z:cRTrDBʩya#HuzR j"^vc ` T-pavelS/hit`RHq/tq `OfYsfps}1 oa@@VVSrlV F"1+-27i41Tb4_qt_;aXUZ'2rq_!n0^Spn4"1PT_sE<raa-A-A1Hc^IpJ}bL;M 2NQOQ1\Q\QQؔcWRS򔆮TqqW&o$Y@RZMb[Z\g]tpapa_TAQQAbpJvq;Ϥ+bܤoa pQ+d0AW`BfpsĠh-posugpf&+o-pmbptdp^lbpgfp<XR*R?`0ȁfHT` jd0+9TfpxrpbASv_vPQE~ 1;F`a`пҡ<Rfp%tknpr !3EWj|Ϸ2HaB^׺/9Bmqn*<0NHBew0yQ̀~`ϿOdpahp+=#bRdﷰ4z:$ߢ B<R ai*rp iisXmTZdj `F1H<zՕ{xMOai?ޯ!_` tD.Jr5<}= ` K* `[Z l v 3mAxD{z{.gMbeᒎcQɥģ"Ĕ,/ħ<w{) ѫcĔpĽ} ѮmJ5N13_MsSspy`ID?S/c}ba K"eb@o> )עa2¢ `DVh%# UD kmjBt2ߧ߹$c褣%!rr[Rf`}aTy7[C3DnUbe|b;ruv`Up0;nr`1:븁X%`?$2$N1} Y4Brb@qAa-aa;qa `//5?*?cT Z "!0ЏQƽP׳M+Q ܒ*QDb)QX\T׳a_uu`bېRIu}ܒQ (l\O~?hzP-îLޯ)ّ `{Cݕ2YǕT `ƲG . !ё1ɕHᝢ WGMĪ ?Ɓ%Qx?ˆt/$Ͳt ڔ,ş$O4  ,o>oPobotoobo1*BBZВՑ7zBoaa^屓aG9gl6ҩ$xrNPƯدk)D _& 3EWi{ߍߟ߱ /THZld Z_VjhRdv=X|w$%F / >Pbt~44FJ3x<T+///(/aNO:QN9O:////./??'?9?K?]?o???1???? OJEOcUQOOOOOOIYvotuoacVh͟y_T_ j &m toB)j ooQvwёʮhg_#F K?F&?x?0l}G{,:`@"   -?%aEr%Õ))EyXR} o`д$#?ęxQIr/xUt,bwXUl0i1ewvv֔tQ=Q)R}h4FƶwGYs?CpYՕY 4e%{QHvsLQgŘ{)tUbzf^ȯfܓ֔% E:a`*q>^TbtxoMdǏ@هT/f/˖{OtC]c VqǐI֒FV –nUF?vn qsɿ *Ys?F>?F:Xc?FMڪ﫰|JܡtQq&A=QngiC21MwZ漣A$rqg0N܏tE˔F*CUy ,Xjw=Oas߅<`ߦ7 {RC*OoNkrfŜo-?QKe_? Obweoz}&U.V@U4kW5wR<k 7 fwdůWdvw.ϒ,u6^.ڌ@(]/5Uk@ k@Dw,|@q K0/'LK9'Cm0ϾSBs2ّ5)\ls?1D@ q>Aw9#uQ y~qҰq٪$E)U5G1r9{ a`pM>AkVRbדL!.UEG!ܱ'kWH٤*Jn}viIpU'LG(A^|{Օ4߭9w7;Օ f!+S2)kt/'م"Nj$6HZl~P`4@?FEO jM󎣈PuY Qᦤ8%/˸?F#eֻ?Fp5Ys?FwtÊG?( 0O(LпDJգ<}?5^ITLQIjL$?~OOI5 ?F;?F[E?F3Ϛ)O(L|гY<ߍTLvߜjL-C6*C_U_'5OZ`?F3$i?Fů$_?Fir8_(L h"lxHOj?FeY?F܇]}(Lz6>19lTkw#N<vuqRd/%5'-q?F0 B,?F2g#+ׄ0]/٨(LYЍj@ T jLCl#15v?3FV{WE?FD~ΏJNk(@(+ WIHg}jLzB՟$q$D*P!)=6?zH7j A^ 3=< 2Aġa1aS~h4'+5.ϼ7O9H%F_p8Ɋ( b-SlJg&ǯٯ>qA>%7IWt?&t:NO/ag#mҳ31>ũ!bX,!.@-R^rC&Aϵπ!EWiSh:Mu^9a.^;g?&h&8!AOϓ{EEEW{>h"R7"(P!1('b!)1jUP-29Z3ˡ#@$ 1d>"wCCZ3Q%p1bؒC!p2@&7DZ3`O}f70seto?uܰA0&2,?>?P:v72p&1"E$A1oٿA32ÿ&A>+5R{w,0';)w-eV-jUCU+ a ?%A'; WTrvt0lghin0w8M=Bb7OOO;J!HR?+vOOOOO__????(?q>_]?u sEtjqttHD: #  h  T0]]9 # B 2AUAɿAhY?ٿA,c2ֹƿP6 Au` ?A@j$u#J.9axu`u]`bu`+u >ڍU@@M&d2}34"ÿDC'>"M-w'>"D(p A "'$"&H' O#&5 $s&]$2JD=#D> D6#U`l"_# #1?9#H1 uNWzIs;2 8J#2zGz3Fx$2[Ru(/YB DU_V?\.?/B:o:2A7P`BackgGPou_ndCM`lM`HQz@@bZ}H D?h1GA #X8P6d3662Q1CUBF4i7o117;y0~1Mr)Srm>de}mspTt "qq"1 1381812tMItBB Eb'b$4ANu13[h2AuBH1BʜA!ʩ¶jÄ[QЄT݄zrf>!$0 %3To1/u/u?8XxAC`IWsSwSwRs%o.@0RgPf KNC`teI`\ai!nn3uZR%d DM`kO`mtiM`5HQ-2HZv#`NC`o>!8V@MKsEŭAد3dVrbRaPags0bjkTykp~1])S搹qO`zfY`u^s;cTͱnA²oivlPA$*q#8@q#5$rQQR*E?eNRעQcBdg:b`OTndGPuNEb;StRaaGPdB[Ÿ-uijd[„6k~]QRƀ@gP@ A>LEs?e\/dϩs Aee"g 9bY`dt2D-Ÿpsr- Sqv߈;`aݔ֑#pS~>"ƒE*ȯ:b` Txk GPp}1Բydc j]aaAe(Se1dSrʴ &e%\g#d ɿRf);M_=7DßӁ„{81,[/O^kxÅi-ùƁ3 uI[Uu`  *L%LBW?AP)Es̲aٳTe"/)#We"ɧ??? ?:OOB&c_ //Y`6?H?Wis??]CJ_ oAUCGP?e`(__! m2oDoVohozooo0 siԛOo oW}_oOOOcV¯]WץR=Oas-](]uќoo~Gm#oTw {Ɔ҆΃S 6:0Bw)s͟ߟ'9ρJca:smQm Krаe/Ȯя- voP* V²#5VUo"/%Wo"O _._@_.o*o/**u(!S[mJ5li@eu+arw% UR°l;Q@nt9'G} !M @o#1Ϡt+o_1OCLRʿ.@=(*jMD`sprkND߂ϚByiϹV" ?5G&8߭\n߀޲5F/=$2&"4FXjj"j"/ɨu*FXjv_?q????9 ???Qc0OBOTOfOxOOO8B+DO!O=`Rotyfb)]O( !1idBm!?ȅKbB;rbrc`R@*@;H(iPB_)/BE!1v])g%oks{1tpnN;GWewyuPz%doaqm Bqo2;a2wtSkmQb@nvUaTtnsb bdds°bjTq3o: rrcjKbT!E!222?kl;!30kC2m2  aZZ!*kψC\\E&&BGO ='`C²!cRf Ot9d_D@qA,@"hi."4 b bA' Y! Pk?zz!!_lDZU!!'7!ƕP d>"p}9+6@ci0_U  ]  @P(  @Fя @<g ( "*^p?4] #5GYk}D MEOBJ_.VS] 2DVhz  LANGV_M5.S] 2DVhz،D BASIC_M.VS՜(h *:LZ}C!DQ@+P/J+R/u[HSO/U`8&?p@Fh@ LFDTyB tuh$/T UF"H$ @FX,b'@Fx,1X8d:UPH1/0rb4,1)*AA/_"_o & #hQQ,0#` B scriftunkg,1 `CWalPoPtQg1E}u+iH5u1! 7 @@j&@@q{`?@rbk?de?s)gUA$ &?$%vr;#xAZ%RiLE" 2 0"U$2 eur! |u[]4 t&&(s~rzlH1AUH1?iiu5U A2z"ry"!rByǤo VTPQayl p.-LPnPʡTn/k?Pup r 3Qiߢ PuPws 4EPtIkrJPo?PlΣV?PrPRʧIE zmr󢴳 5D b9TB*nPnߤz w 6VO_LA!0jA_(S_e_{W'6&!3A(P!|/2g1 D1y 3 H1D1oE7@q;E{e|lE1@@~!@Fg#?F"??QkPyA`Q27W(ATl 1<25l =clYmfxAj`PL!@@[g?+@@1#_  +??@*o4?PD}p\z-uru`u0`bu ,0@@"4o@a/T("q6y /B#u2#Q*"0#ᝯ2,`6@@ߊn`P @$Rxww &;01O5l^3);hK(a~"f''?銞dYկd#j_ubsUTK0a?DbB`@_bzI/!*9`P"3\o"҂3q55"1I7zrHQQr9:z:a:a9aaR=R>rrqdQhQrq(Qr q2aatQmO\A[|RlqlqⴋrT,@rLQux҅t0`q^qspC&Av"rBpsx^֔u~*k sa4azuNu ca@bֵf7sa9@xU܅-rQ9PCaLJY„f6D/@r*PPV#ROj&TP@Yn`@Q.%Z$yaUBaXfpUu!`4q х4s q, to! ТZhQi5,Qrc3LP@j*(B<@b@t^?$@o.a#@PIKr3[b3KlqJj/ \Ca/O Yn`5PQYYCZOǐ2p:F>pKBrOgOq/@LKHKEKːYCJ=Aұ)VZn` OzOOOOE^Z___*_<_NXQ3h|GCA|epPKd @A[-B^ AYsjT '_*%kŽ_Zoc ss&qg1]JS|([w-v`DbKOb=~h>fkviKleN{>f8{gyjPsKϝ̏|vTXc[עHAmeZfDo1ock2DN&xݟ^p*wwjo>f?ʗTٛg>f,GY^4Fܢhn??Qa$%<}Ac)#]KAt^ FA1?0_P_b_k?@ܟ9Zm(͖pB?@qK:?@W@ή8W?@S^HssZ*m(5=*/P#&?@!W/?@?@F?D'A&a?@0?@vj6??@^ŴBI!O&J6/iлH$?@ T9_3`?f\Θm({E=T@m`mЁ?@l6͊`?$Zm/~&0=ficUo@@K^9`lخgA%=,^?@tA XP@@z(fkT?@Ô1fe=j.ƀ?@P3{@@5 I]ͻ?@+ӟ wu=~@h{?@j"~#HXt$'=)&3I{(?;3 BP 6Oc= }…lO'ƯدH6[0 O۶f_@3l~ !H<@)>PbtφX?<)H\t('A)_qUO}Oash8\@?x<@fb@t^ OH3q$y)AᵅO(Ld?x<ǣsxqM~^ACi#Sq"T#25(3 #A/'2U:H'm!D0B 4"!` TextwCo orz`` LainFn 3` Asa&!4` mpP  Aj({"AFn(?A!3~yZ[O PA4wq 'HFdAT(q_q3.gq+-뵅h.((zR28y3yPU)^BPԇOOOOOOO__)_;_M___q_______1i/{/^X" 6q'AS6m#p Pya d4//*/E$B ?' `?FM%# %?)Q.(gMACoooo|+>,",x>Pbt}R,>bMt=ζȏڏG"fFXj|xU,įFX*@pԯ8G(ma" AS$4!̱ q 2aaJYĆcy"ȵ08̱u){`gl6ߛ @@L"@@Hh4?;C¹?FG[r;1֔'u`Fx8<"\YP"Q!cZ"[!y!!2..Arq´"T,@-@<إN`-9-L7Cz`.WAoubҟ Ab;sż.v̟6 ˟W'?̵4!Z)4TB`xAp`n>@tzj7X3#|گ2п6=#X)Zܽ?-qry ~ZAu`u`6@rbÞ?5@hȩ.p@Őc)a&M/_)s6})7Up`uR!cA*qK2-[!*U)aYӣ#Հ@+("euF@X,@@cP?$ 9/fPtwߡ2wy#wI`rSbK3`q a\~a c©Fǐ⫣  Hː5<Nas i!3 w%<;㐐/?/e u' @N2 -< N6£D1 5s1 .h fx sN!P $4b ?O0A&GWA///&6?+//O*`׹m(D11CJ/"ǗNO`O//??M'X'jY K"K(;G"JK>1?/KK__O]?o?_"o??MR&vum]pɍ0cKdoe񠬁lOe񑡡ojFxKk@[?vuS‚@Rmw@|mƁ NGy}Aog6oo?M@]f?qoooooo\ke?__O\Z ՁX9/ӘC_U_KXF_}6Z?Fe?2”,zNt/DF`0 &F1l aPdl/BjZԘl-U%{ 酼N[bzf^5OHƏ؏bo2oBbd?Ffl6&FF?FuWuZ}1cԚ\ ϥRsBTЯ*oo`GÊ ݢ{Կ̗ޟM2.RkO֨ϸ"4FX<ߺ̱ 2U9MSiO@]ўyd!4q<|a|aSeOZAkQw9+OO!@ Ai1/C.</bsPbOo@nZA">Wi{;^5˩?@FmtĹf~:G?@?@;:y?@A^}/9*[Jȏ :g}= A?@I l?@C"<%0Md 8#(m?@'`sI@DϜͰMUh>OmHaoW?@&N?@?@+%U OmHg6D~)cK?9?@_1n`Um,H_mHﷃP,P 5b?@|XYm˗_mHOgOw^ G/6/?@$!?@d%FV^_mH+5gf ?@qK"`:= ?@6NZ`4olI5gN@S:3ί?@foQ̄oȡ4$1qY?t@  O=rbK{!v '炏ÏՏ" ?V N x4(1(<1> =Ÿ?8Aį߬X,@@cPx114<1U??D$yίU@`0 Q/Si ^1Ts] <kQ'2qiq GQ! Q#`n%bC6a i@bpИCa zq`%`>qW,@Q"acqB@>q݁oӂ_=@`{k*#pq95"+aq@uo}5ub lҊq徶#Ž#UU)*/023456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdTUUUUUt4l"H$ @X,b'@ 7/cC-d6>U A;XUUU &U'()*U+,UUUUUt4*"H$ @Y,b'@ D?C-@= An@@_RR@j/GPRH<(H<(nE@REj/RU%`8@P(  @?F @L LFuDTyB uh$T RUF"H$ @FY,b'@FxS1o8{> o8!!~;[1o8!{>_5"s4"{>%s4#{>!o8~7cCP;1!S5V )*qa!!u&'$4(405f71$;1<4(+@4P4T4qaUctaaU+=Oaso !!sE H  #4쁱0a` B scrif ungZ1[1 `CalFo&!Q"فf0! V rb ndx- ` Cn(et~1 1ՓusC1 ٕ9U;1TyKp ,U"& H-EB ?@`0  @@ `?@M&ɯd2?Ѥ٥?QZS71#|q =t4C[[s-4 ~a#0P_贁Nk2q <\.Y?ۥ l10Re11EGq qC1q Ҧۥ-E+uLs#- u;1H0/1!$,@yqmq]yߡqiئq iOqiܥ#>/ai4! &iq@X'2q a ]j~1 ݟrD 0ӝLI64eptepI(iDi1zD ƕ I)^!3E羚bX, @ȧt:'N٨!3EWi{ÿտ ʼn!3ϫ(qQcuχϫϽϠϗ3?ݡ5GYO?߳ =OQ?UHOiO_a___3?]U_y>X0o&8J\hArhAopӦ@25>MAC-O__uW|>Ǩ,Ӣ٨"D55$ee O%%_/q/(re ////A/////bI/?zuhA ?2?D?V?h?z??oo?,DmDOO(O֯g^OpOOOOOOO$_V8ou_D(Ϻ__^po$oĶHideoY+RXbooooooo b PqA*btwTypʿܿ+@@p8"@@ 1]o/1CJ/ϏᏀ/Yk}'ݻ͟]Pq /AS6w,16=1˯ݯﯟ0%7I[m뿋C<1//ϓ'?9?-?Ho(,?oߨ&a@ buM+%~mEa1BA Pq6I +ö`f?+@h5ƌRU0xSX,Au`^$?RuYGQ#ERfRtQA ?PbCtQ&b3d!g,b2e;mQ~Əy&3E'A\_/. FU($P>GRSԿ _U(YWbÐ'@Q.j?@@4 8g t?o[E銓PbtΟuA'O9OKO]OoOOO H )tgalod@8OOO _(_:_L_p_V,ӈXd?kѤX____Pin\ gs2oDo"ff8foxoooooooo,>PbtEuKXP9?گoYYN5uV&8Zl'%%O #;:E|ޭCUg] ar%sv0~O?;-Cyg;-F$ʈ?xrz?M;--3xҗ?2eE_?vE =RϬV# =g˱S?=(E}uX?i=+&?#HƳi=9C9?/+-**c?Ά@+-#5GYF///////??+?=?O?a?s??????? @.@Rdvݽ$ ( !3*M ! %tYk ~z 6q٠Fs:xrava  r-vrNxg"H+b+ @Q ^^A 8uo {U%7I[c8ns<ٯG!v%P>j?@@͌K/4O=VE蛯b摂$l~ƿؿ 2DVhzόϞϰ:X]͟.ϐR %Ÿ ៙?O"O4OFKϯ);M_q˿ݿ____oo(o:oLo^m"Com_pr.pC`unk`za zoooom(q_{faZ~;xmb|ry_qpy~\ gsvf8!;ԏrB (:L^9^NXUϲoYYϟ̼z/A/z?-?EW{ύϙ8}%O,]tMI[]/=9ar%^v0~O?MCyͦgMF$ʈxrz?MqM-3xҗ?2P]_?v0MRϬVMg˱S?t]}uXϽ?^]+&?#H^]9C9ϼ?/ M**c?΀q M 2DF~OOOOOOOO_ _2_D_V_h_z______@+=OaGPy!% >'rM/_/q/R 4+JR@@p8"@@8 ';rG8u}3mUs}}R\hB+q!rbEqz0O}@ } dup}%//,'e!t'#o%}-%񶷹%74Lcck}ŏ׏ l6@@`0  @@LOcTfс~֤ P&sS>bYE"3qr -#ѯ+=OasѶȸ׿ 1CUgyy!ѩ}}!Ϲ;)AES256CTRf}!Sm}!}%!7E4ZVWt:!@@̮N?׹K?Qmm-%Sl-" v$&@"" "v 0SBN$`rlsUD%S@8-тhHa:=;s3Q?5756OF?>:g|693HzΗ(U@8rT Dvs6`S,@q!V(< <<~? S?qC~ I[?ù}ߑgM" P@8d:MN82y'FB0/RC"4-Us^__U/S_o;Qv?;oMiUe"LooQnYzoCoCUL8koje5OFYp_/Cߓ=AB3A5D?9\.k?8$144MhEc]o??w+¸?Ͽ*Wi{0ɟ۟>Sp p&?J\@nȬr͞jt 禤x<@dB *PbtfBӘϪϼ@ VEV}od oT1^fBr fRߘr@~vDT1`?,>Pbt HZ(:Lpď֏ @R6H~ȟڟ 2DVhzxIp1/'/9/K/]/o/{/-mEcE//// ??.?@?R?d;GPd14?1=ݟP5=I=(ٽձauu4IDh4F@lC$@@T??չIzc`4A`nu` ï?b*^uհ/YudiyXQ~R1١ar a-d4c2o\0BP@N`rquu#c0P贁NktdsP0IUsۑ(qpA~1 d*9ձASewω̭td?1z?1?5????OO'O9OC~#XF IjF|OI?O@ONHFBFw#@GRRSiRX\/zVHY'R#pO'_/.____]Hc6R oo&CoCYo `1qh 6rcwc4 OD $OcHcXFrcXFRo y"?@?$&8J\i4);z_q_*ݏMo_o!o%7z[moQc!3E[mïկO(:HTf>ο?2`rϺef$HZl~o&`0 "vn a //-/?/Q)o&zUo&t:N" "3U4U/ Ν"#",Kϵ&(*?A|G4=)o933hd0z@~?xWb5@5GN_ʟ1^P%|1N5OYײ=i{rO겷hf:+5G﯏ղտk_$Qcύ37?I?[?m??2?????7DO*Oe?w? Ͽ?ί?ǿO(r/OO^pOOOO_'_6H]_o_~ϐϥ_1 2DVh>Zá?~S5\>%6Q \>wj?0B0?JcdT6K';?\*NoO??oF*N_B?*=?PT6xgd?,RssNW5KG?vehNy@~@L>:}:?mTՁL>%ֵ+7?Zص,^/O_?/_?_H6~PѸ(^52P{t?????OO)O;OMO_OqOOOOOOOO_%.6HZl~ȿո1dݥC>u?nk رո7I[/%N`('Urӳ  Ȗ%Q?{AS1ju//)/;/M/_/q//P///U<ϱ?_ؓDŴo\ Uҿ;Y$]1Ci_{_6___(1/45ɾ#;:T6–d?v;Qˤ4W:@Oւf&IsLsFXC`?ǴtFYuw?tFÈ rvT6GA ϫ?UsD?q5sCב*?qs=(T6F-ϑ}ftl5ꔺ?EctJϹ? Vc6[ ۪?c6 o2oDoVohoFߦ߸$6HZl~+=Oasȁ_ŏȣ} ڄߏ:#*1"2"`1ꡑ$5Xۯ} }6ȢΡ4!A0򯒧" tN,P}zs& B*1JC8} 6F| 2 2 2$$ET^}$@r/?@IV@?DљEd&?:X".l-F@^]aK(0A`CF nectF roKbQ@*1b]wV!{AU#q1`T_W[PigtoUA7Q__tYTtAUL0`_UPPt_PecPnV9R,o>oo@d)AUpJQ1`\o)RF dPnPUA;Roo}o!e AUJQ`ouTcPaspPUr[Pn]PyVCR.@R{]v3%a=arɜ1TLHHOo1Ǥ/}Y"?Ʉ}L\ Uys|Jd2L&ǣ Z F rw 0/Ab/ /2/(@ Ҫt:@@~_^ ~@8x?5=ԴJ6Bmus`u`#66Bu`ukw- O&`0@@|>?@'?N@"43^34^?ٿ# qc21,+=&A˒w *}wL?p^?p?u?.2 2²3?7??L&GHq*I  |XCgOH$4@ʂOOOOOOO_ _2_D_V_h_z______._ oo]oooRovooooo&8*u`x&8J\TǏF{,Z{{ a0B/&s.'Z6?8->?Ÿԟ .@k$Z CP/@CVE{1+%5n'2}//-p8?@dL @@Ai-A&O8OJO\OnO@OOOoOJº ?@E@?RT*X8_$T;_MQa_MVd5SÜ{________ oo/oAoSoeoǿooooooa[8t:^+%I@shߑﻏ͌Tt1揢.@Rdv/AП^π*zNlewѯJq@@.`4F XwQcuٿ!3EWi{ϟϱ 6/ASUv?4?<yOXjdO?O?OO 3bP&\nq__/4FXjod%]?Iz^l@ ~sa//*/Qcug%#0P贁Nkd#&@㴚b?3L㱺q>o챙oVЯ$UU)SD*[%BXBd5 xf͟ߕXaZM,@/1)5<& 0 %CU>d5ԍ,:%k}Ųdѿ!=!a>Q;hɽ?qxF!6Q)Q|h0QadaaIَa`E_''D?@v?n˿&DF C,x RFAABuI`Mud``CC!(dկz't:?N"@BC "D"JBOOrp!>AFO O^aӗ"AR"DKS!n8q 0H`Dq_B`0!p"./"$Ry#p74Bg tNÀAzs\A4B +b!4caQSYbYbYbHYbR4 tD@r"?@I`?d&e?,!4!&_/w c6DyQл $l_F $s,^BuKOa`sql *^qQb{}"`uQ`sWJ ich 7o/B`u@`Pt* en9rK]@JB`u-l`{R%dpn s;rʏ܏\AA `uiq`T asa ec ykCrM_q#5 2>o9@R kx!xxg}enD~Dh4Q֭? Q?Q1@(u ړ5x@a Dp/////"0///$3Pڱ&?H?DZ=ڵ@5< 9t @@|^k!@@p8@@x< CLjePSPAu`?4@T}ueoɲD᜵PS*oڵ̑؞@q3Qڵ- c oݺA˱DZD 3DZ SU"g`xAR rzwE\|x8N@ڵO&*nN)2Lвr˱0Sñ;˿ݿ/%7I[ywΜϠϲc7s7 r9~_&+QB B@@$Fğ `TЂ#%fxҘ1ߓݺ&߾?*<ܔppmF/+=FED&UO<yO L%OOh#_gOG_FORv9,L'B?vPD,-vPD0o@U1( O-D&UA%U f -h"/T))+U( g&+ P- `m-vT ?!}-a8LXQ -H*)A/"/) EQ.///feArial UncodeMiS6?/?`4 R$fSymbol$67fWingds*7 fECalibr@  ?$fSwimunt(S$fPMingLU(w$fMS PGothicj@ $fDotum"|i0@ S$fESylaen  $fEstrangeloU dsaC 9$fEVrind1a8 Q$fEShrutqi| Q$fEM_angl$fETunga<"@ Q$fGSendya*/ (   R$fERavi"o Q$fGDhenu"*/ (  R$fELath#/ R$fEGautmqi | Q$fECordia NewE R$fEArial"*Cx /@~4 R$fMalgun Gothic |w L$fETimes NwRoan*Ax /@4$@+EB@+.B@+%BC,=BTC[,3B4C,5BC,GBtC -:BTCD-7B|{-ABT-0B0S-1Bt0S.0B0SM.0BT1S}.;B1S..B42S.9B2S/-B3SL/2B3S~/;B3S/8Bd4S/IB:0JBTheDoc"Gestur Fom a FhrungsliieGuideSTR Normal STR-Verbin d ST}RCon_ectr&msvNoAWutCn 7ecPage-1"Zeichnblat -1Row_1visVerion*Byte oudrVai bl&Byte o]rvai7bl0Byte oudrVai bl. 1,Byte o]rvaiwbl.1"Zeichnblat -2Page-2EinfachBasicRe_chtkRectan7gl.Byte oudrVai bl.4*Byte o]rvaiwbl.40Funkti]o /Sbrune0Functi]o /sbruneKlwamer0Byte oudrVai bl.2,Byte o]rvaibl.20BracketRow_2Flus normaFlow NrmaBUmgekhrt sucwif_Klamr Revrs bac6msvPreiwIconCopT Pa g&msvStrucueTyp 4msvSDTarge_tIn eusc io 4msvSDCaloutNHi'ght(msvSDCalou]ttye2msvSDCalou]ttyen *msvSha_peCtgorisOrientaioHi_deLaruAtachToSide"Re_sizW_thTxSideLewadrBginLeadrE n(WHBoxInterUscin IsEndnteri oExtens7ioInsetSideMpont&fnMidp}ontOfse&CaloutuSyeR oCStyle1CStyle2CStyle3CStyle4CStyle5CStyle6CStyle7CStyle8CStyle9CStyle10CStyle 1CStyle12CStyle13CStyle14CStyle15CStyle16CStyle17CStyle18CStyle19CStyle20$Orient]aioR oOrientTopOrientWBotmOrient5LfOrientRghOrientAuoLeadrRotLeadrH inLewadrC7nt$LeadrMipontsuAsociaton$Design}fekt.1visUSETypemsvTheeInfomsvTheeyp(msvTheeLatinFo t(msvTheeAia_nFo t,msvTheeCoplxFnt6msvTheeLinTra sprncy,msvTheeLinPate r *msvTheeLin]Wigt.msvTheeLin_Rou dg@msvTheeCon _ectrwra sprny6msvTheeCon ectrWPatr 4msvTheeCon ectr]Wigt8msvTheeCon ectruRu di g2msvTheeCon ectrBgi .msvTheeCon ectrE d0msvTheeCon ectrE d2:msvTheeCon ectr]Bgi S z6msvTheeCon ectrE dSi z6msvTheeFil Tranprncy,msvTheeFil Patern:msvTheeSadWowr np rncy0msvTheeSad_owP tern,msvTheeSadowty l0msvTheeSadowXOfst0msvTheeSadowYOfst<msvTheeSadowM g_nifc 5to4msvTheeSadowDircton"msvTheeCol r$msvTheeEfe7ctVerbin dCo}nectr0Dynamische_r Vb7nd(Dynamic onetrTextPoWsiin6Dynamische_r Vbnd.14.Dynamic onetr146Funkti]o /Sbrune.186Functi]o /sbrune.186Funkti]o /Sbrune.196Functi]o /sbrune.196Dynamische_r Vbwnd.1.Dynamic onetr166Dynamische_r Vbnd.17.Dynamic onetr176Dynamische_r Vbnd.23.Dynamic onetr236Dynamische_r Vbnd.24.Dynamic onetr246Funkti]o /Sbrune.316Functi]o /sbrune.316Dynamische_r Vbnd.32.Dynamic onetr320Byte oudrVai bl. 3,Byte o]rvaiwbl.36Dynamische_r Vbnd.34.Dynamic onetr34 Textan}mrkug$Text AUnoainuAtribuesBpmnId"BpmnCategoris(BpmnDocueta in&BpmnArtifa]cTyeBpmnNaeBpmnStae"BpmnPruoetis,BpmnPruoetis_Nae,BpmnPruoetis_Ty.BpmnPruoetis_Va7luLBpmnPruoetis_Valu_EUxr s oBdyTBpmnPruoetis_Valu_EUxr s o]Lnga9e:BpmnPruoeti_s_Cr_ela oBpmnText$BpmnCatego_ryRf$BpmnEleetTye6BpmnConectigOb]jTye(BpmnCoditT ye4BpmnCoditExres  RBpmnCoditExres _BdyZBpmnCoditExres _Lagu;g"BpmnMe]sagRf,BpmnMe]sagR_f_Nm8BpmnMe]sagRf_PruoetiBpmnMesagRf_Proeti_5NmBpmnMesagRf_Proeti_7TyDBpmnMe]sagRf_Pruoetiw_VlubBpmnMe]sagRf_Pruoetiw_Vlu__ExrinodyjBpmnMe]sagRf_Pruoetiw_Vlu__Exr]inLn uPBpmnMe]sagRwf_roeti_uCrel o2BpmnMe]sagRf_Fro RBpmnMe]sagRf_Fro Prtic+pn.)TyeBpmnMesagRf_Froj Rl FBpmnMe]sagRwf_ro Eti'y .BpmnMe]sagRf_To NBpmnMe]sagRf_To Prtic'pn%Tye>BpmnMe]sagRf_To 5Rl BpmnMesagRf__To Eti#y  BpmnDirect o&Textan}mrkug.49*Text AUnoain.490msvWarwnOPesoal5If3hcX2G3,m2%G3T2$G3Xh2E3$h2G3To2E3m2E32#G33!G3|o03E3o43E3l;83E3|;<3E3@3(G3hch3G3Ԉ}3%G3Ԫ;3E3h3E3ĭh3G3t63E3TU3E343+G3d4(G3 .41G3_4.G34%G3hc4G3ic4G3h4E34'4G35'5G3D!5/G3P5+G3d6{5E3U5E3L65E3*G3Q>.G3<>8G3>/G34>+G3̆?.G3 '???G3<~?8G3?5G3=?9G3<$@4G3tX@0G3@2G3D=@;G3=@9G3=.A8G3dfA/G3>A:G3A2G3B/G30B2G3TbB2G3D>B=G3B7G3C%G3-C'G36'TCG37'nCG3ĈC0G3C)G3ԋC G3>D6G37D/G3>fD5G3?D5G3D?D5G3?E5G3?:E3G34mE/G3`'E6G3lE/G3D`'F6G37F/G3`'fF6G3܉F/G3`'F5G3a'G5G3Da'5G6G3kG/G3LG1G3G.G3a'G6G3/H/G3^H#G3,H%G3hHG3LjcHG3TH&G3$H)G3T&I)G3 7'OIG3<7'hIG3I$G3I.G3I-G3J1G34 '3JIG3d^|JPG3a'J9G3X7'KG3K(G3ԌGK&G3b'mK4G3K(G3K1G3^K=G3$^7LEG3|L$G3L.G3Db'L8G3 'M>G3 'DM=G3$ 'MCG3MZG3N]G3^{NJG3,N1G3^NKG3t 'AO5G3 'vO<G3dO-G3D^OGG3'&P3G3d'YP6G3,P$G3DP)G3tP+G3T_?Q2G  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~]>rFxml:spaceprsrv]rFxmlns:vdhtp/%scea .i+ro of.+o/i iA/20*c6%et/ \n]rFxmlns:v14htp/'sUcea .i-ro oUf.-o/GUf=e'v=s=]o'200'Eet1 lnU.U-t4 "H$ @Y,b'@@d$C^-@=7 AU %t4 '_d C-m7"AJ@@GdJR@T@d6RH<(H<(JEğfREfR{Pd  g"4FXo 8)hn(N1j@(Y{Õ#aPh_D~=}B<U P:]dz`S^> BvRER$l@!()0U'*~}1d50259QT/bB="jczK&D[՗Y ?t?@d}D?!@ f~%!borgbackup-1.1.15/docs/internals/structure.png0000644000175000017500000061173213771325506021414 0ustar useruser00000000000000PNG  IHDR 3UsRGBgAMA a pHYs\F\FCAIDATx^5I/ϟbs L4H6##HO2a'"'2 D0LdDO sf۶z7:ݫWU{{U5``````````A=aE`````````:P44U[e````````c`````````\৤6S˜````````g@XOX`````````s%M%MVm`````````)`````````l@;)M0f````````c`````````\`I~ISU[````````xa=a=`````````:P~JjS;%`````````~`````````l@;XҴ_TmՖ````````b@XOX`````````sN c`````````a=a=`````````:P44U[e````````c`````````\৤6S˜````````g@XOX`````````s%M%MVm`````````)`````````l@;)M0f````````c`````````\`I~ISU[````````xa=a=`````````:P~JjS;%`````````~`````````l@;XҴ_TmՖ````````b@XOX`````````sN c`````````a=a=`````````:P44U[e````````c`````````\৤6S˜````````g@XOX`````````s%M%MVm`````````)`````````l@;)M0f````````c`````````\`I~ISU[````````xa=a=`````````:P~JjS;%`````````~`````````l@;XҴ_TmՖ````````b@XOX`````````sN c`````````a=a=`````````:P44U[e````````c`````````\৤6S˜````````g@XOX`````````s%M%MVm`````````)`````````l@;)M0f````````c`````````\`I~ISU[````````xa=a=`````````:P~JjS;%`````````~`````````l@;XҴ_TmՖ````````b@XOX`````````sN c`````````a=a=`````````:P44U[e````````c`````````\৤6S˜````````g@XOX`````````s%M%MVm`````````)`````````l@;)M0f````````c`````````\`I~ISU[````````xa=a=`````````:P~JjS;%`````````~`````````l@;XҴ_TmՖ````````b@XOX`````````sN c`````````a=a=`````````:P44U[e````````c`````````\৤6S˜````````g@XOX`````````s%M%MVm`````````)`````````l@;)M0f````````c`````````\`I~ISU[````````xa=a=`````````:P~JjS;%`````````~`````````l@;XҴ_TmՖ````````b@XOX`````````sN c`````````a=a=`````````:P44U[e````````c`````````\৤6S˜````````g@XOX`````````s%M%MVm`````````)`````````l@;)M0f````````c`````````\`I~ISU[````````xa=a=`````````:P~JjS;%`````````~`````````l@;XҴ_TmՖ````````b@XOX`````````sN c`````````a=a=`````````:P44U[e````````c``````K }~g_uZ ``m7< 0 0 0 0 0 0G '?y͟~o/? . 0 <#} `````` D$?|Og Q7 0 0i?X{om1 0 0 0 0 0CO̼ܿZ1 0p?zi/Q`````xwWZ0_>sn<'`2zY~O>ď0 0 0 0 0 0c |_>GpïߓMzSo~vd>\1 0@aߨNaȀ[:mB 0 0 0 0 0D{o^D:f`5  {l 0 0 0 0 0#{=ǿկy,BGs\3 L|*g`````xa״?ۏ~ٵz<Sǃvx`q  10շ 0 0 0 0 00a=ţrӟ}[ڵ7>L`=1E`` \ހ+9&w 0 0 0 0 0- q(Bw?C{_* 0 0.=x?_`Gq 0 0 0 0 0S [bwݪ^8^YxR'`````g@X~}q`3_Xy`` vbD- 0 0 0 0 0@\?_j`/~]` z``S rVo3a:N 0 0 0 0 0 π?\]ϣp{qm`g0*Ah_ 0 0 0 0 0 a=x7WO~C0 0 7 7oa 0 0 0 0 0 1 ק{zַg杻y`}P0 0 0 0 0 0n@Xi=F~_m> >ikKg~b`|)gb8:1x=C 0 0 0 0 0Wcaw xߴ%3?1 0Qz31!````` ^<6l~Z' 0 0szz hDog````xa=v{wO~"瞙 0 0@wʹ}fe=# 0 0 0 0 0 <Հ=?X }ss޽s``f>: 0 0 0 0 0c_jX3 0 t?o_VG+w3 0 0 0 0c6_>Ca=7;g`~#;G`````1 ٯW_zn7 0 \n\| #vDΉk`````5 F?=:ۜ1 0)z&F v> 0 0 0 0 0O5 nz\pe\1 0@a=a=4P;Ń`````.XaQX+`5 5`Pz&ډ0 0 0 0 0 ŀ=, qÕ}r 0@D`{`` xOZ7 0p||6zVAXL;`xa9M`8>YzcE_0pak?a=Ff`Y jYYYФhk`B?ok'V7&mߨz2 bX/VY^ w~ 0pQƁe>e@XsW=\'W 0 0Pk@XOX Nw@ȫ}4yyҵ4a=aFyb`Zn<aL@*xQmk!cd1}j}qzkPX+`ahecz"'g|<}|h? W ulJ裏h{fGP'"SHdB{5*QU˔1o~_ӱޑg}O3 WW/%WV'ԉ`k@Xo3z}II}\X?~&H,C n2޷K}^-YʸFkǔy 曘sc>}1u9Gո__}yΫ+ꕼ;W`a=a=4`ҽ狼oM.Ws9{О%}P_8+R5b pcֻ+c}zNW=N a=Ǹ㪇+`j j Բv"=3h֋Z|'Ufb(r> W ƿz%w>/-W({U~7l@XϘ1&>b`Zzz hv"(Zw=2^F/΃W7JaTpF @뀰m ݳRM~Ͼ5گf@X^s 0 0AZVۆorP#42fɘv 3*ajzss =zwe߳Rɣp_=O 1_c՘`6 'L~ۓYa0(7s~e JX/w|kyg^MX}??1fm7 0ˀÖW=\'W 0 0Pk@Pkg"l(OK;x o{Hk?q_O7Vm߾oD;ro-VtL?wc"?|ZhbjobޏadWq21ks>DZ^7Wߚ{˰^f%39gh.QsDz?[ܵ}U`'w~=%&7^P\;̙qu-:5]z%=8^j~:jj\5v\{6 0^X+`5 'j'ۻxb)a闺 SL7<ܑ Ss~%VbqCim_s~Jn dSҺ&޿ZD޽{cX(WY5?%%7~K3w(ii--MZJ0Y1u朖Ch93VQϵgsba=z z\pe\1 0@AZV3NgfgjaҀM.x=<vڲu5UPSwGW)Y&D Ֆ=m˅7v*1s[7Src> і17KB[ǿKMclI=u|f9/jg(˖aeMA}N[d. 9E{J !gY3w*5fr)2Ek߹,:z:.y5j 磒k'KB[m8uܚGͫ6ݯZS3/k@X}wq'՝}j 0 a=a=4`I\_s_ nMٵGnR/o&mY 땶mސ֍JSҮ65ҺX*i&քz{‡.{qP@^[hiȱRoYӘ'!V3^u92Laa#m?ޚӦ=q-kayӳwt%s-/%b#ןy-J*jOM1>e=JB^8ʽlH`\j_1k3])vz8o1^{a=zٲ_``ƀրA-+j&2aԪ$Ztذ ;Mڕrawk!*.x]g)}D|\ lz:rۗ\۶o7t31EJVߚ6˵#Vڎ)v#t_5m=o7 i&82sږܪ~9/e58ue|L\϶ լP[[1Ǫb-WA+9\hlqQ2?;zL5l==*7/54նzrL Nzv[rz|*8{ac8z|m]^NY o@Xo>b qu;`ЀdZrAk!ٶzW͍Tmrae0a \?WxZ._qq: 󔎁 A$SmKP Dl"uc0-j^蓵TJH+qonkJr}E?]ܪV;J~ze̜ܒ3KWo[ׂюܼYZx+ ~s>%c6R:2sϵ8+nDM[=}.߳1E/b óW=\'W 0 0Pk@XOX Nwް^ܬmkrݺU)Ydx%R8?^Z|q#>w&t3Gikrs@#IFV]s; mvKZXd5!~s/ Դt~<iέ8Vy}8G뻽mӜ9:q]2J|IJ+[Xɍ=tܼܣXKSۯ57^[7- Omq%+{d$R>aӏ>[bjm*'UW 0 0ee=AD`{fF1h] knҖ\MI.UBZXoO8ajb?`Ti\8so(*Vvz]M^{_MPo`Y>1tb[#_tkue[r>UfjqڒjG} sfame1k*SH:8261 B֬9K޽Һݮ#~^֫yroSϽ/G /\[_#zr}::wu1|z;Ca=Rd`  10M>Z=n- Q:wdUڴ^IbnuޮS7{&@͞} n< V4[r}!w#v?[rOKiߴi.|URL.w#hS~I[RKB{o6;sfq8?THd%\mRK{v#;#jUb`ZZg"l(ZhR7ZL=&kstY^֋}-ϫw!|N+8\xuf^MM{Z {tցJxGjq4X[ӣY /wX/ Lȭwź3zGGz:ZzN~qmWك)O+ƱWWsL```i@XOX MjWXo 핆1r7Wj~{WQJGoBzΞG'(96G"9ysjޛ{eKj;:Ou[uqoER:{Μkũ>Kl绌sͭXR+Ḿ5_ɭ7(c/1 sWZ:ٮ쳊\%QԉאW 0 0pZgr ^a ֋An'mp\ɍі-{V\nF lwt-GnyrCG˚%=5fKKF\+"7Rڟ۵ 8=g{뱽֚Ju^~Z񣡲Tx?u=T?׎{ [-jԣ=yӾ^_)9c䘶) ?g\WSy?Z1߀^ca=^ѽs`c`@&M>iQe1VOjX{7x]G4A#76w-k&kGǽ[tTJW;Jzq}Ęz̵\m@:Z_ 5[c"* z~i򱼥!'onFh'JG^^azz\J.kʞrt^, 6GV:߷wΞ3{r+n=(=k|mP{#ckVlyS0r]]^ֶǜckxZ}:5zr^{|ۯ}+W?G+ƱWWsL```i@XOX MjuX/ZdzaνoYah(65 ޖ\ M}wt{Xz?#E$L0-ŵP2۞ܖsfϰ^B U&x~~rXtz?M_{WܚjcTVخ=aZkYXޟݻjǛ' w qe|0 0w0 5`Pz&;L.zr+$N$Zx۳QPJOsD;guܪT[na=':\mBXo{?j.-BzSȠ4f^+G뗚7Z͋޽[}t2"S2yr=_r=gFy^5kJ93w.Z]r+`VRsF}eiM@oV-{q%cvg˔Wԉ{N_=i\ q$+ 0c`@&q'm}޷=zQ\,ז|VAF{Z(~W"ѱG9TMÎt1X8bnqclk YYon!0kIQn߳{A{>Z=;)+o:爫]bnhY׎X[sT~<1k9>6o箳q1Ǖn+"??յN <π LX'8FN`߀րA-+?pM^a~s7zr+m ֋{WY87U;&R7|Fz~J݄uio _f^+{6Fem1OsΉ&=g Yg]vg^{o駟&mOݮi[ޗz=u>nVi_[m}}:oW( ZNԋ֓/)Ǖ 0  .rWhLNa܍VzR7'VlI+Mjc0Aϛw07'<>T[RN$s?VJWM;5kǘ=Rk\l_[9-mأ%cթ֦K#wu=z_=mpw#Sg=7G{Ӛ~Ӿzo>hy Wګz1p/zQƇWXv,3 0lZAiR 7FmXW;zRbxqCxq5+gޏW5=&5>2[kGXH]jjV=C¹`Nj;{Μ#3}gwgJ];}ŹS8̭,{k[ckz=UXSAɒ}߸-euW/b^a=F7WkZΞ3{r=cra|GZqJm%Վxzm*mޏBc{Ş9gK{ڗΑU 8/=7Wԉ{N_=i\ q$+ 00eeqX2nWX/RU0r[Výn2z9:,a|X/w}ϪhG{zLξ޺%ߞHjgϙz=Ÿ%˭ZSz}!`r+-߳eg~n=r=]$,5}שW1 wi\q5g3 05 'Lϝݱ֋o[+^sFَ/ o '8΅z>rd{XzaGF7Zr׋ii;bΌ6 )؞PRi_ =zϔ_XS+^BC8z]{ mU}fm9%}lmu<;[^ ݯOF'z\9p 0 j ԲU2@^nUέ']ћ)`X:+ptnŊqzϾQfk.F67G^jK`Um w.{V::K* >z>H/sfef~>=W;-~e|G[skZɹɽwAc<~zy-rU[=˽7ν]Il;o_utk9fu w])[j׸Lzyͧ1Ֆ&7uӜs=n5z!Mg@X_m_٭W߹ 0}  10}&Y}qn_n ι{ny#vcOUYGZ57d%n^;#e ymҕahO+kXFh]#VrsrM$棘s?%u3G8^sf.Hg5wS{Z[Y.svX-5s~΍3=9c k+J[ל /xdZ} $sw%c?WqZoׅy/ >xmɗxbe-c_5cf~--jj~?3&z a=zO``րրA-+j'33Va čҰLn%G ԍ)dR*WHd-иCGľJn,[)*є>^7#e ymz%a8loRVx_q#ac6*o=C3[y,{Ұjsy6j[{\*ST_ QA>QSR.cmn+;{dYx?{Υ媸{5>b;㱇Ga=zO``րրA-+j'334{BclDL1R*60nm֋c]a/?[Ҿ D[G4q45##M9:19VY{kj| +m͑%uS[SBSkm ۦקpgΙz[clg]ߒꗽddmy sA{rY-k󹼦krdIM뻨]飌K{BQb]gFҰ^}- o6 1{/OS{O 륮 g]ԻgO 0Pk@XZ3% qU60 0@ozz ha.Nw5vHX/n}$^M8*7}3z<8SHF%ᓭ1:׆76-M `HkS^;j%n PT5mahM7kR5,mQG{֜yFX/|}^9jA}ufX/w9Ur}}Q3sf-}oV|_zZ*&J-ǾoEyr9.SK5۶>acUa=zO``րրA-+j'33ǝ)7J#J [9nmK`|>f-#kzڍԿdG雑L`!u߬ Ըke%@ܚ?=u92W emRtcd:Ιg""T U\9VVK')rq\Q4w7@SzJU9$RZiqG7y{eX/λS?-zю#y{j^M%lZ"W`rc$n^ a=>{㪇+`j  10ډ.#AJ=n>JoB.7nJS`h{!oͧF-W~$s_tiL, R(}mwޛPX*G%hc圱t]?k1[Z\:j5p:ޔlZԪ\ 3 *5q\{岯#R-w^%Vwnڀi.]=ǭ# #:ڥ8i{-MkдiYzsO[RIͧ[5}8ΙsyTPuל9_l?7կoWTs0U1cR_Se?=Vu9Zt 845O׎Vѭ!jף[5ma!7VGGכ;CGytNp}Ip&~ 7 wk 2.``;c`@w\E YlF8GX7Ԋpfj ךF2{=v}+ث=OonR?ciٖxjiڗW=\'W 0 0Pk@PkLaZqYn/QC^izJVTOcR^Muf㹧Ԫz޾1Gڷ`9gs+z,4ߏW=\'W 0 0Pk@XOX Nw`ZP]ܠrx̓՟x<b_ xt:kw4k;6w1"O r~gCx>Ùcp4z0+UW 0 0ee=AD`{f`@R+?8= 0?U􄡍W߲ ^H]ڧJ r•}[7R_2wU/4 7f^=^ڠ3 0a@Pk n< 0@jiŦڛscnR۲3J"P\]2w᫘srU#68z+l_3¸GԛT{ʀ{= qÕ}r 0)#g"l  1ޞ^#G9kS 3Z]O}W-姟~Ū#rMzuwZlU:{Ԍq ۷W[a=1 0d@XozI8@ܜU!r "&gu<^@ju= 83 0p加7Q lc 0p'z<(UW 0 c`@]<`E$V~ ҟ}@X,z_N 0t-Z۾Xecaa=zO``րրA-+j'3 0 0 0 0 0pz,(UW 0 c`@]<`````b@X>b`ZZg"l  0 0 0 0 0] âW=\'W 0 0Pk@XOX Nw`````a={X㪇+`j j Բv"=3 0 0 0 0 0w1 bz\pe\1 0@a=a=4P;Ń`````.XaQX+`5 5`Pz&ډ0 0 0 0 0 ŀ=, qÕ}r 0@D`{`````cEa=zO``րրA-+j'3 0 0 0 0 0pz,(UW 0 c`@]<`````b@X>b`ZZg"l  0 0 0 0 0] âW=\'W 0 0Pk@XOX Nw`````a={X㪇+`j j Բv"=3 0 0 0 0 0w1 bz\pe\1 0@a=a=4P;Ń`````.XaQX+`5 5`Pz&ډ0 0 0 0 0 ŀ=, qÕ}r 0@D`{`````cEa=zO``րրA-+j'3 0 0 0 0 0pz,(UW 0 c`@]<`````b@X>b`ZZg"l  0 0 0 0 0] âW=\'W 0 0Pk@XOX Nw`````a={X㪇+`j j Բv"=3 0 0 0 0 0w1 bz\pe\1 0@a=a=4P;Ń`````.XaQX+`5 5`Pz&ډ0 0 0 0 0 ŀ=, qÕ}r 0@D`{`````cEa=zO``րրA-+j'3 0 0 0 0 0pz,(UW 0 c`@]<`````b@X>b`ZZկ? 0 0 0 0 0 0_W>O>S6ԑGud`c# 0 0 0 0 0 0Tac]usWXMq 0 f@Xo?rzVճ  0 0 0 0 0 <[Po{XWhsJ 0 L 0 0 0 0 0 0Cs\H`tߡ`3 0 0 0 0 0 0pakϿ3 0_-)``````ڀ 0 0pCC@g``````Zz֟g`'b 0 0 0 0 0 0 a=``|6``````2 U=`c`````` I.( 0 oP`m0``````ր޵_``/  0 0 0 0 0 0 m@XO@@@`tߡ`3 0 0 0 0 0 0pakϿ3 0VT 1``````^ 0 0px?>MLx 0 0 0 0 0 0Wc*{ 0  1 0 0 0 0 0 0$````7w(6 0 0 0 0 0 0 \k@X 0 0|K``````6 '  0 oP`m0``````ր޵_``+ * 0 0 0 0 0 0/`@XO@@@`|w &<`````` w=e`````````hzn J0 0w00;X t``````5 wmW``Kz% 0 0 0 0 0 0C````7w(6 0 0 0 0 0 0 \k@X 0 0}C``````0 '  0 ?>^;@ 0 0 0 0 0 0Uػʞ 0szz 0 0 0 0 0 0 00a=7%``;M : 0 0 0 0 0 0ֻ? 0 0a=ߒb``````  0 0w00;X t``````5 wmW``z! 0 0 0 0 0 0 ```/i ``````*z]eqc`a=a=``````ڀ 0 0 ``````k ][՟`ҀoI1 0 0 0 0 0 0``;M : 0 0 0 0 0 0ֻ? 0 0`e=AEc``````  0 0w0д 0 0 0 0 0 0pa=1 0܀ 0 0 0 0 0 0 m@XMrA ``~}k 0 0 0 0 0 0? 0 |i@XϷ````````hz 0 0 ``````k ][՟`o1 0 0 0 0 0 0``; qh`c`````ʀ{Ws\```n@XOX``````6 & 0 0pCC@g``````Zz֟g`4 [R 0 0 0 0 0 0 00a=``~}k 0 0 0 0 0 0? 0 XYOP7``````xz 0 0 4m01 0 0 0 0 0 \e@X9.{ 0 007 ' 0 0 0 0 0 0Cs\P`tߡ`3 0 0 0 0 0 0pakϿ3 0_-)``````ڀ 0 0pCC@g``````Zz֟g`'b 0 0 0 0 0 0 a=``|6``````2 U=`c`````` I.( 0 oP`m0``````ր޵_``/  -oo? 0 0 0 0 0 08_XnhA;zjɥZ2 0  /0? 0 0 0 0 0 0XuՍ]a73g 0 00a~_WM9Z `````6||ހ*fU=`)M&q````` a 7G7 0 <@7:ֻ} 0 0 0 0 0>zz݄2F``;Iz&;L.! 0 0 0 0 0zqa==# 0 0pa=a=4p.R 0 0 0 0 0= qÕW=\'W 0 0Pk@PkLa````a={X㪇+`j  10ډ. 0 0 0 0 0w1 bz\pe\1 0@AZV3Ng`````.XaQX+`5 'j'ۻx0 0 0 0 0 ŀ=, qÕ}r 0 jYYDP;؞`````cEa=zO``րl 0 0 0 0 0pz,(UW 0 0ee=AD`{f`````b@X>b`Zzz hv" 0 0 0 0 0] âW=\'W 0 0Pk@PkLa````a={X㪇+`j  10ډ. 0 0 0 0 0w1 bz\pe\1 0@AZV3Ng`````.XaQX+`5 'j'ۻx0 0 0 0 0 ŀ=, qÕ}r 0 jYYDP;؞`````cEa=zO``րl 0 0 0 0 0pz,(UW 0 0ee=AD`{f`````b@X>b`Zzz hv" 0 0 0 0 0] âW=\'W 0 0Pk@PkLa````a={X㪇+`j  10ډ. 0 0 0 0 0w1 bz\pe\1 0@AZV3Ng`````.XaQX+`5 'j'ۻx0 0 0 0 0 ŀ=, qÕ}r 0 jYYDP;؞`````cEa=zO``րl 0 0 0 0 0pz,(UW 0 0ee=AD`{f`````b@X>b`Zzz hv" 0 0 0 0 0] âW=\'W 0 0Pk@PkLa````a={X㪇+`j  10ډ. 0 0 0 0 0w1 bz\pe\1 0@AZV3Ng`U /y~sw9 0 0S |_gO>{?~Enֻ#jd΍o`c@Xo:3M` D0>o~ӍӍoc1j 0w0?޳|;}h}|8 w 0 ,  !ng`5yP/0}JlQ}I_ `8׾z_*6{gs8ֻO_4.ɳs`c`@&NN1 ,WՋV~m.^x57ϵ>=Z`ڷ?G}s?~}-[3@|fE7j{Gz\p)UW 0 0ee=AD`癉@ܼ󣿟L1p@jU>Kl()pNaw##=x|<0QH]}qnzuosߗmQ,;}Ս qe0 0w0 '0hC\Y~`k'n"+@mԍ+BӜ,p^*0$4tx΍!+aS+[aW;ةµ#>ӏ+-?7vZ ׊Z ׊+b`0ee~dw0 ͷPBPH&#yu.}?=Xtk.4tb\1p%$ [@OuX7|ץ>(iਫ਼ή^]*^YxR'``AZV;hLJ{auݻwC'F=ڞ -C==>˭&l˭V4ke}wtJTNVa=D-ڜzokcvՒW 0 0pzz h6ȥzq:BA?$Yɭ=yE# lj{Gn=w>Z $=V֋t^q\ݦT苚Eյ|#;U"w4*631e ׊Z ׊+b`0ee~dw0 ն-5J_z#nnfccI=z|c.i՛͘c ,%}}\SzLa=.K.o%Exe^a=Fb`ZZg"l,3zXUdOs",O𕺩#hԊ{H\+XYm`V׎|2>``;c`@w\EGXo\`/>ԯz >ɁԜ7["7[y-KkI߿n߷CZKX֦^uԾc|xx3{X㪇+`j j Բv" V'O =˵yH0"Xyy~;οbeL l5E߿ny>Զ{xsVuWYē:1 0}  10g߉ Ź=x'|7"1ǿ3zqc87 ?=jzvjE/NJeWuYv# ܩ|n~rsM|ż6M)GڐtNkJX?z^sgnorLGVJڶ9ZӾkrזJ\Ƽst]ՇblOX %}?]SH͵ytmYpkȓzg-\\[\}'Ok'5z}Fjta=zX㪇+`j j Բv"녇p\2(LA.nOn)=97uTH(sMݥ5~:ҳ_8εuRbZ/=f8HSZ'5? ~kbu|^)q7{xݲBfiojڑr[guxP:nKw)0}@8kK{eY: qSn/hs_[}d'%+QM^"z}{蜯wJjf׍21mk8ث/^Q?=3W<۸kSGz\ 0 0  10;L."wFXomu-$*o%/v~XnsMɹE}hǑZ9սו۝q=ΧלT = Zs[I?"0SHߒ|+Jy${B[mYG]kRG|\Tz~RrNcv5)8KD> ]--W~<ק\\8=ӦhGB*5u %op}PtkéՆf+jc%w/'_j> WzޯZ8z}tz\n1 03 j Բ3IR?5pFX/ږ$xPz&\t[+R$Rzܚ@^+wl{ \=Ko}=7sXSq"lc5a}~_ܣpKr6.JP#A嵀kK1չ6 v_qi8Hhޏs4]ͅ`jؾG-ka\s17TբoQֻV.,u&#mv,W;ǾjC1Jkۮ[{6w7꺱^H+WϐR+ޫ1pasjz5Ηy`i@XOX p9ꗳzՋnTl=z-~ٺṷח\IvC8?=k~n[ "2s,+:N5][ao>g/ڳ?[3%Z<=+M׍2[c0zwԉח}.{wWnaRX+`5 5`Pz&ډ2sVX/* M+uՆ&Go/Ŵ*Jj!7ضb|^7:L}h[*os7s)[ci5g̓>ߺmL߭{NlL}4smͅl -]Ah(foU汭ksѱ;5|o?'5Dro+U2mM0 bI>~5?L@vrI0:5F\ Kw6wằ^tϴ*pVJecr]WoR 1àW=\'W 0 0Pk@XOX N3nQ l{,<=GڞUrS9'Y[9nm󖺉2W걈=>1sr庑{/S[;fE3OGGo5^38zTz\jy 0  10&:>=V &5o}3عm[Zt{۞!\{cg/w͢=ȳum;KkAsk35S]rU*\Ky9s|瞹] [Ԙ>s8$5J1ZɬUz0Zmj^=Rs홷eqO87FGq'w]Y.׍~^_Zjom|p㵇ia=zO``րրA-+j'?L[}/FV޶cշ8߳&~n`@.:yq(=fT?˞uMh(y&##mɭJXjd#p2vGoTZ>w[G1b`Zzz hv".)WkqcMVc V*޶5UߵשvD+~jY٪saJb~j(ܖ7ύ=a’nwopcyq.DXs\Tk|G9%5i>7-~cS}rkL_6ڻcXN׍RKU f9Ҿu>okaDX;`xa=a=4`r~և{.r7;"7'g1#-oh.7Ru^zg)g/*LInu:VJ8Y!Y=zJo:?\ nuiy{wPAm\=8kuٻ"Н~OmVv._۾ڴ蟞8:\ eu-iwE}Vjs}4pq?a? ^.s~SW*իo 0 0pZwe *s%k)I7KZu=hϰ U| eQ5c臵>x7ZT]"5[+~㔚O98r~qTh8#T7}^usu^n\$+q[ZHP7u}( MMm_9ez-_*hw M 5_)َ{JXG6|Xyq5|{޽c!( 0 <ۀ01?{b3zrxmdA74O&z.X7fgo oY~eSP>)=GծZ&]#K6uXm#׽z%sT7}B%7ܷfȫzO.8r[Dy\T5pGZ,;Z=uG紖}%W˾kXo^bnZݰoQ;t8:}!7g!׹*.ߧ>nzkz\ 0 0 j Բ6sxFX/wCikew%ohMy,p7KmZ;ﳏjI?&ڜ]ҶuT{z8 Zj*TPjeڮy^i" _MhsQ\^s!qNm9z.ZQ˾BXo]][!dM߶4pFq&s=5zϾ7cas*zΓu`m@XOX x=3zPVx$J E}ZKWG˴m>s^Sr3Ԥy^zeꚵsQkxkK甚qhmZ} $Ǭոe?1?ӵ Ƶ4pq?{aVc4%~AU]g|zl/UW 0 ֻ9m/g"ka {>5aVKdM7Kmݜ+ ۴k%>8S[b|.W+ -'(=s@V0`c[/5waئy^Z}TV@JkK9遭}wG} zVb阮kֻ{ؖ}*4Z&9ߪ>nzm?QX+c`a=a=4pE]zւz%+m9"f}QJL|i;Դ6 3մLmjuS<{tXڶB"ķu7a}9򈻖mKS'U=a^JvG]-/+؊yhmJq6G}Hռd~n]ǖ}^0V>踟׽e앎%Wsާz鋑ƅW#yv.<3 0\Zi2w%z܌йKB)-[XmZQB=wh{ֻwv"/.]m.O?[r4 HFJcjGY͞csZ"\jp{'RQh>7egXSzq??;07OZǞ1zܵ|v k@Xzoa=^źd`{c`@&{OGep$>8ȦGHք[ZRm S}չgXmݠ}\Q":8*d\q:R7sast^"В }YlrsV \#7+3WzǨ{WES{7-o~[r$(5/l9~״Z}ne8Ιab꿫zq]iob /_[^z-/!w?{jbOǽSX/mq->|*Kcߵ!9q5cΦۿz'a=We``Z75˔7i͙PE R4rA=ayUIͭ2չgX.mY>/90>Z@p+l㜣>[7q[s/rZgյ/e6 XŸݳy [mny{ƜAI*_;zQlӵ`߽sZ,ws'wٚ˶nė\sm(k=K^575-X;rީe >[a\jNn~).Ψyw f-6uuŸcX/>=^k-Ź1t>宛}z^3;_Wt,+w3 0p?zz hd{ɶe}h[˷zG{K.ᅚQ\)ǛnËV`%0[WޥlM7cuvg/̭Mfw m_ ,v5Yi5_H.Ѫ5-opku.<b,O圷J95GWUsԆ㗆ZB1GNu5]k ӵ,[AlֱM9~[i=>e+56kS˾KXo;qmX_.<v.hawmz-u4V;v;׵Ktuv{^;k+w>z8n[`3 5`Pz T>-\{W)I7cq%nυa^z#s^`2vN7ߧcnwi#?%goFڭs k厵u_ߴ99^ץ[>r7{3v5֌ݵOF뭅tidϮJ^ZsH%JIL jҾoQVlzZXs]5=MԺe)W2vB!ܵt^:UX/`Y0%qOۻ\QzkpZS? ֻO_4.ɳs`c`@&N%}H՚`N\;Bt[`!|I0bY>XUhM@:&]^+*uS?}ݭmZ̃%D_j{Ni(mrm^3"ز֘J{[HJ>승$W:~Zjp#AkU-CW՜֫[͇s[s-ޑ`d͜9JX7]7J\%g!kcȵz޲Sw_W]wok_SN>8```@PkL.w\Ö7?jnt|ahkڭP֍ 0=n練ޝ^ZXm>^3,7tY|YhgpqJvkTlB%sV~s*زծ#7JHkΞ֮5;v cV;[֦\ܮV?5axO3ї[unW\SbFk쒪_ͼ?ko^omr?!#֦׮=t9t\BT;SxJ`~mږI6z\՚=3 0 0À1Y.BHq㌛%!F=qaanuö&6TOA3]GŶG̞}8f )@+]b)*ƞ48NpQY˛5sđ8vò/bۚOm[9d+:Y瘪92snݖ8˕]{sQ// GRkyIߥ)y?2[{8}YɪzSkjj2umjϷ紞}_{=ч17m-~i4T{P+{M;u* uVZ^z}3?dz-}Z_Xr%`ߋgD[\S+'-mi ۥq~{5,MZ>^%ޗV3-=7j9RcqOԋ?Ӗ}Sz.5Ǟ3̙ߚh˹eivk83̹pV_ꚽF͹t1Ѻ6spL g-E:9z]Iii}A˿o|uŜ;ͷ5ʭ*2GDZc޷GV <#uZ1pajJz%Εw`k@XOX t;} kcݹ} 0@jh}Z}{n 8TX+`5 5`Pz&ډ0 j (\p>w6 0RE(/j@nUWrocEHf8>;Ka=Q[xd`u  ᬰhg` gL 0k ޻w,.<^|y.{b;R_7Ʊ/`@Xu^ qӗ} 0Q;z& v0 ԍ? ghf Jfոa=5.zcR˽_+z,4ߏW=\'W 0 0Pk@XOX Nw`@jE7Oݽc^@*C|ͥ+}iN հa=Z8ZCX+`5 5`Pz&ډ0 n ҉Gq?{x@b^ǣ=?-Wsg{M81 @ZUM]{O |?z\pe\1 0@a=a=4P;Ń` +8wH6:\?Q^V0]kO?[sY|^os/ai{5a=&{㪇+`j Umm_z5L`@ꆟhƶ>f  1̿fϱ9iGz:;M#7hYcqaMX0xj4·i`i@XGw/x7G 071y#s2~` g@X  0pw2#Gy>w}ΗzTz\jy 0  !޳IS1 0pL?jn[}ko^H`LMߵe 1π^ھ[a=^ٿs`c`@&LB_0 0 0 0 0uWYē:1 0} j Բ^AcRR_`````WWv++w3 0pzz h${IV_ `````΀^]*^YxR'``AZV;hLJ 0 0 0 0 0@?zjnzeΝ`c@XOX d3 } 0 0 0 0 ֫_e+O 0 5 5`Pz}II}`````g@X_m_٭W߹ 0}  10}&Y}/`````:zu⫬^zeuI`` jYY1)/ 0 0 0 0 0 +;w`a=a=4`$/ 0 0 0 0 0Pg@X^|KXN< 0 0׀րA-+4&%e`````a~}ez\? 01 'Ld````` Ջz Չ'ub`0eeƤ 0 0 0 0 0 3 ׯVXWg`>ЀI> 0 0 0 0 0@azUV/a:N 0 0@_ZwИԗ`````~ q; 0 ǀ0g````3 WW/%WV'ԉ`k@Pk2 0 0 0 0 0π^ھ[a=^ٿs`c`@&LB_0 0 0 0 0uWYē:1 0} j Բ^AcRR_`````WWv++w3 0pzz h${IV_ `````΀^]*^YxR'``AZV;hLJ 0 0 0 0 0@?zjnzeΝ`c@XOX d3 } 0 0 0 0 ֫_e+O 0 5 5`Pz}II}`````g@X_m_٭W߹ 0}  10}&Y}/`````:zu⫬^zeuI`` jYY1)/ 0 0 0 0 0 +;w`a=a=4`$/ 0 0 0 0 0Pg@X^|KXN< 0 0׀րA-+4&%e`````a~}ez\? 01 7pX_C?j 0 0 0 0 01|>xOqO>7մ}!3 0 b@Xo?~_~G5``````~'7}n =e`#o=rV! 0 0 0 0 0 g?֕7$ҟc 0a=0 0 0 0 0 0 0 ````7w(6 0 0 0 0 0 0 \k@X 0 0|K``````6 '  0 oP`m0``````ր޵_``+ * 0 0 0 0 0 0/`@XO@@@`|w &<`````` w=e`````````hzn J0 0w00;X t``````5 wmW``Kz% 0 0 0 0 0 0C````7w(6 0 0 0 0 0 0 \k@X 0 0}C``````0 '  0 ?>^;@ 0 0 0 0 0 0Uػʞ 0szz 0 0 0 0 0 0 00a=7%``;M : 0 0 0 0 0 0ֻ? 0 0a=ߒb``````  0 0w00;X t``````5 wmW``z! 0 0 0 0 0 0 ```/i ``````*z]eqc`a=a=``````ڀ 0 0 ``````k ][՟`ҀoI1 0 0 0 0 0 0``;M : 0 0 0 0 0 0ֻ? 0 0`e=AEc``````  0 0w0д 0 0 0 0 0 0pa=1 0܀ 0 0 0 0 0 0 m@XMrA ``~}k 0 0 0 0 0 0? 0 |i@XϷ````````hz 0 0 ``````k ][՟`o1 0 0 0 0 0 0``; qh`c`````ʀ{Ws\```n@XOX``````6 & 0 0pCC@g``````Zz֟g`4 [R 0 0 0 0 0 0 00a=``~}k 0 0 0 0 0 0? 0 XYOP7``````xz 0 0 4m01 0 0 0 0 0 \e@X9.{ 0 007 ' 0 0 0 0 0 0Cs\P`tߡ`3 0 0 0 0 0 0pakϿ3 0_-)``````ڀ 0 0pCC@g``````Zz֟g`'b 0 0 0 0 0 0 a=``|6``````2 U=`c`````` I.( 0 oP`m0``````ր޵_``/  0 0 0 0 0 0 m@XO@@@`tߡ`3 0 0 0 0 0 0pakϿ3 0VT 1``````^ 0 0px?>MLx 0 0 0 0 0 0Wc*{ 0  1 0 0 0 0 0 0$````7w(6 0 0 0 0 0 0 \k@X 0 0|K``````6 '  0 oP`m0``````ր޵_``+ * 0 0 0 0 0 0/`@XO@@@`|w &<`````` w=e`````````hzn J0 0w00;X t``````5 wmW``Kz~Ko? 0 0 0 0 0 08?q M5hg@X]-TK```a/~Ԁ`````kwހ+U}`F4 79m}_}7h5D`````^W/s|'zǺꆫWs\```n@Xo?r#v5`````F6zo4)gytn|3 0;_X>0 0 0 0 0 0 ou }߰{ 0 0@w;末9 0 0 0 0 0m 뵩#\?oak{nлn_޾ۿ61 Ѐt_HzF0 0 0 0 0 0n@X1F>ͰޟgA-mq_Ŀx~ֻo7`````a=Ο>Ͱ{A-_@R?Gaw|*_ۑ6y5Ws2ƀo6 0 0 0 0 0 Հes1bnpt ,EP磏>z>S}y5O>ywOZ/-aRXGz8]o5mk( 0pzIֻ39#`````^c?\]]/B^mS\jc??k/w{ kⵥsÑ^smƆmՕ2 7a=U 0 0 0 0 0pԀCG ^}o5a^Ƕ2׹S>oK=u1GJO?GwԖ^z-`Vkm~J8NN01kyj  10ډ. 0 0 0 0 0w1 l&2 GԧgXO>ywȭb=0tgQ8MS;޽{W܆En:cD[?G=TfhX/5-B4&aD[knk>6i`#5ĿymQ5:x Z$`````F4 Y?(H`{OE*K B6oծ޿E0@:{"TuURwN@\m?63IKE[JDd%yml=i@X7g0ags,`````zc~?}_/ }H륂JG8b* lmu\zѦ^6ܵuKCW5uɭ 1ȶw EЮZ߅ SaymSxIr#s?^c@X7g(agr<`````zc_巾?O!3j˭747h zm[ *ukAGn}t㱯W;J귚 sahS\{"֧[Rڱy5^gc`@&gOO1 0 0 0 0+l?O{'?U:_k֋Ֆ)/rdž>A.p%+M!˭ dru\5pT5qgOݷTmֱJ~Ze%;쮽Ra#V&~"Zoo[aet5:rYY@ҟc 0 0 0 0 0pĀ?Gyxm+=xczMy^.gu0\" BTTmr] ʭN[$L+9VJzoInsuX/gs/}ph8`\K1뼏a-zz h`k 0 0 0 0 0] y?ܖ52o۷]y4Ԋz =J7D֚گJ<ة0aM0* cyco U8,UY- Ms]$WqW_GX>8a@PkL=& ````` z,u>?MXm{+o~?o߫iͅk˜9#gkKSr+}Lfn%mJC]K~Jd|\ꚥT w`XMȰ8;KTHy(CkagIo+4Xklaɀ0ɛ`````j@X+cm#pW_<'{+zڿE@&3Qnu#a>KBUkYql<j}aW[kUa#JS-1֫sߣQk޿0@AZV3Nc`````nxVo[`ol^>0ۑǁNV[PV3* .1u^ zsjI.W\Pnk\v56Sl˙ch1 0 0 0 0 4 SKOGxPk? eϵL)yZPHj@S6%']zܣ᰽MTPn+twEϽw?\?ݸRKрրA-+뙬F 0 0 0 0 0k{~~x޾7 Y)u\&C%?%- 1#eSaXlm+x*>bŻ: m9Nez=–]g &mtd````HNszy@VHhocAAJyg^G[M-Wj;rrq iq0@0ee= 0 0 0 0 0S {߾o4]U/|{ n'| M}Z.VBږ;rA,ϯj[(%p؞F[2JVȵa=zS&mpc````c@X=nZ&Vg ~\$j];^MZwXe(E0՞[.W#be5I=rk嬖<v-z߭ǔ15AZV3a:a9/````` w߿~<6ţXS+ſE,uhyT8.ڳ66fN"pc"ڨm. Y2ߞ9amE=k}'yfXoqԭǍh1.탣;c`@wlE`````Ā'%Nzl׿wݷGoFG{Gg^)VKbuT[Ǹ[X/T"D?]O?S!R[؍`-gw oKV,l=n3'0ee=&m``!y|h[ @XԎ`΀uezޠ^+}By E,=c?B^RKB]\X~)ȵ$]k̼aoGXτvG% 0 LAP1>[7XH!YM47F_Aplc`Ӏޘzѷ*^[zezVhlm 爵cs빀awf;c=σR}!czՀ넣]. 0 7=b"0>d@ js\^7f``j 1Sk]#s]z[[*^x=C'r3>\آ65{rle?]ŶAZV3g@|hAp3FF#t^ʷJ5r:``Ooz_~ߜHzZM.a\;u%7\u4k[|1xc|0=#7q=wO[s˒M/9a}'_s1퓫  10'+" 0Co⃟+c ʒ 0w5 Y6?E/m|o}ejrqzr ]{\GՖ/y@.A~Sꕜ]}96=a8^Vk']k̼AZV3gka7[KB{V2fF3pN[|} 0 0 ݿFG7Cz~zg:לG^3X5.׾Tp$Tu`UIh,ծԪlO0|t-O{~RsksnmՀur^. 0f`+6&⃡*~*ߕxtVbƇn|(j* 0 0<`oo\'O֋sq)sK}!H,ϒG 1Ϩ̧BnSߵznX_BpGhoX/X,qr}JX/>Ooy}  :emz&#~`a 8@&aGޣ; 0 0 a=}x_*qa\koU޽{Wb-4Xbh,w#T[J6ObɱS֋sRzx:%64 w˞9߷I,k 0} ć}Ƈ.Gv#}_]Օ``^݀1{ W Ձm}-zZ[qduܓ/j?VO}W^a{WZ~"3a˱{3VRLgGhzTP.;Y{ZD}JXG>w;?3 'Luz 0 >D0w^ͻ 0 0 0z om~ӟ ]p-v V'Ck!0MX(}|UeX/Yh,ws_(^KH@&=`a9ڿkjS#ah5 F-Rrw j{uwIwQ€o2{>+ܽ?c 0 3d޼~vVm````z,?q3?/Ql;H˽&z)Sh/A'BazV[8X+[W,Ѱ^\k[/y/Pzap)6S=&Dʭxkc`@&ךf20Ʋ?' 0 0 a={ڏF[1z{JW[Q/ m}RV{~h#H٣}t45 k_zQzyrdZ~J j Բ_2m  03/>LcŇOcߕ<\{M|xzɼ=QcMMoV.ϭʄ4}жTmoY!GL9}G?|Xۯ8eM.޳kzO͹\9,8>7=Ͷ} 0s =0>ՠ^<"{gO86XS>~+h[eT_ *6=vbTr{ >hE5+ n]z[cy<^}  10ɝ`g}rU n$5{>,/j.LJ.[B~Ju 1>]/ eƾ[j2_I[־ǯ5\oTG M;mɱq>Rvh[8|Zk~o 0 À=a?٪z/VhzwKRcT|tj XnLV_Br},93gq>5=GzSm5f֋zԄm}}Y㼮5A!V{]k0 0pFIpZwx~Б &ϭW|;~xm_~ oT!yiߖ*s/s}yGKSwyW-{{Vm}B_0 0:^׿/6zӟ;tHVt[IGVZ &BhyE'FϚZ'7]R|oo;<9dTEs^CG(cDVv-ehKVI!ԞVA4 'L9w <@|H$UpM\X/>d۶rU#u-75Aip$WZc][fCkO18N˰)(zd=x60 0m>wwn@%OX +2 h@PkLV#NVΉk` B)%޵ DmdĵVm&֋r23rV[;kk`ap &}`x\hcG-y"L1Z`.L6/eP'զxtyYZ(RTX/#/b%j/:n*cNh79 QT䱶kc[rs^{zg'ηŇw9K/yz\-;~k>g 0 c@Xo۸ [k?5_|۹iqSk BIܢ\l? 0  10'MB 0ra+{diX)n-PVY My`*qs tskUhSe+V:=%mY m=84[E~ o?8-zw%b-Y=K~}ƕQ`׎|>ՠ^o^a.%<^Õ~ 0Ap[YoA``Nr+rzZJV: ڹw%9-?<,Y/AV4 W2Z _ LՄۖK-jW(8JzMI͖ۤj8GzwWC{,y( 0 0zAq?i38}2}GJ>˝O3X'm2~`Zc`@' sa`cGRG,*]myPI-֫ }W\JQRMPo+WҞuQ*վԹKY9ֻ^W;w.1aNU_`6 7v^5~cռ{Wqѧ!/|wL` Zg2b2qL`oa+TE@+2ZI '֫onuhW*hY r- ƭ="H)զ˒fjNI suS2ua햏͍]b6jv 0 0 @y׿fXq;}->?ߕ3)Ϻ\7m3`c`@G&uQa`|w SZ[]okegRvBJ[jOI}s Zs蕳s$wDZ[ٜ8xˤߟTs5g``|zx+Yje y vײ7 At[Y$Js``w ߒ=Zォz בёaop\Ga`i@X" lxm^mݻ495`` e@Pk7 V3M 0 0,w }j^[<4:\ַG[Bnջx/>O|qr3Z=.u+nF2 0^{wa=zaeď?GޖbPG`aX3 'L&~`e ˞aoGd  صWiG@.pG}5VR^+3sǰޱz=wdyf``lzcUWX=4"egzO>wm7g0 2 5`Pz&^ 0@?ܝzXυR{VS˵eHks ChK.8'XѮg1Oiֻzlr|Mڪ- 0m@X㪗-e`j  10I. 0 @W|;e0n+2ZٮeZy67nz̰޻wzQضdAe<~n9ZZo{\~``5 fz\6f1 0@AZV3Km8a`^rᡭzc*Sђ!d2s$ht䵹Z?ae-rkzc[u.(;_mƩ~qz%smu-`q ӗwz\ɣ 0c`@&םgk VAґ2 1-Wk94:\VZG["hk6u­ѱ8-zWZe}`ր޵տWv^l3 0,ZAh_ 0 0bT)Dg˅>+'{:0f ="7i~gom8[++Z8f`KX+`5 'j'ۻx0 0p~n**4-C%+3Zz([*t=ް^%wl'W6WmԒ`4 _z\pe\1 0@AZV3Ng`{H]": ߹w[]KXLMR _%Vfo{7wDZڞ^\Q 0gc9a=zO``րl 0= |'GF`b%#ya~KMXohOI8u`jSz{mמyMc=a֨Ք`0 ]wz\pe\1 0@AZV3Ng`HM[Z_l0~wYUz%Tsu`jjG} hjLrAǬ=ްvk{z'o`x-zgoa=β81 0a=a=4`73 0\޽ˮAX]k  ]]/[<+bTJ:053bi6x{7ǽnmOX׿^c~``owoz\ݨ1 0k0ee&i 0r+E+W{s Q}jlG >$ja^.UJ_|~Z\<aA=m}|.`wVW\+ E('R[M@ԑY;櫅N{m{k:0[-<ԆnYljk{W7ouX``z_z#weIX/"m>O}%(u#P|޳?!c`5|GNA.V拕Z؊JWN{.809ec_%Ա"7EM$DZzLQA)pZ{o꫒ u-#8jlO|:Y;:q``กx_%aWs6``s  10z7 0U"+iŊ^˟3=EsǎDcq=-wݕ* /K˺> ї]b*0RF=;]m\V`l@ `@$B! 9#:2JskЛbs}.bDfky9-GuZ圌ӧ:xøgNe,?kSìmmѶ=kf~0 ``6 fsp&0 `@ P80 ` #3 0 `X%>މp5'0 `gq&,:V0 2z_e0 `u c>XW{ 0qxd6΄Zb=\ª~b0UfX̿0 `Xz[4s>s0 3^ L9| `03@5bG0 `a@e{*wƍ04XPi,8Dߚ^k1 `@ 륱N:a0 `` -~Wsb=\ƈs `@ ˰^ sF0 `z0 `08zlcAyp=q ` a Cu^/x`03@9ǹ90 `@ 륱NRcV1 `ȓPZ V{Ge<0 ` /ZOi=1 `00;b=秺޿z~5}Q?,? zpb.1 `Ŗ0 `@ r_-[(0X[N"{?-;~|B83<>q/)\Ĥ~0 `` *pn80 `b=\WNtO&&KZ'IIc{ʉ(srz.?&;0 `e;,`_߻w-b70 `05j8#olw|l?þBBBӪǧzqGZCi]!sҵ$0 `0 `0 e`1s`}~cM3XM71>=;`sV5 ^ `0 `0 `MbT7cGA~ŷ^){@SMXXŊ`0 `0 `2P'֓4?F/|MW_7nwg9柣b=uc~\ba 0 `0 `XOj!@T47u>Jh o\(~ovu8qH91*UXυɢ.L|0 `0 `0 `` \=$%7b=є~Y?eG)KZe.ĬY_x?La0 `0 `(z=̄Z<^~k(&UGCbd#S0 `0 `0(>REs 7iXN:ۜ?GB2!ocbU_T^+.v7jGyGVlllD.us `0 `0 `j=E`k}}]~T~X~>".?ǟ1l\8" b(ݟ;X/® /D̙90 `0 `0E3Ou??=5Gğ7oIoC'hPY.U}0 `0 `0 `00,֓#Dv2``ˎ>?=!ZG}"j2QMT#X0 `0 `0 $XO*ղp[[[B5B5B1Be_|~~Hp̑Ah 0 `0 `@){hCĬOʟbbb^ef_Ǐ߳K/_,j"CV;K.] 1 `0 `0 ``z!VfD(X)# Y@w}oc~_Y.e"s1Υ0 `0 ``e};mzXXX'Ihd17ze|W֫r4*^Ѹ3]m'sҥK}#0 `0 `0 @)ۻwov]Hpu5?]6b=c_ dBB^/#_wqGVlllDo׺@0 `0 `0 `` TB^4c|]={u|/G?o~GlZ 2ӯz{~ YB;n'֫ 0 `0 `00>1v*w}s_XoWv!˗5 Il>-b=B<z]3 `0 `0 `@7I瑿ZMe0NTes~"7ſ=gy ]뭆Pi!ě*멬}ĵn\4Y `0 `0 `ADiy϶;lßJy?!!M|~|kw~̑t^Η{>*W=,di's0 `0 `0*Xό B~MWm~a')}yٿ~R#OXL0] `0 `0 `(z>@}7l`Gg}>ʟWƿoB%Osק+]~RF8p `S4IDAT0 `0 `Hz1Lk+&U~,'>g+Utr\ T/`0 `0 `0P;"!4F/|MW]~,'>gU 5TKRãSn[~0 `0 `0 `0pbm!GjOĮٳBЪu|/g+_opRXGzpގ7~C/s `0 `0 `@ ĴBAPPPW!pX_>Zy sկa/\:[M=ׅ,K+~~_20 `0 `0 $?tGeU(4 D"ߦު>l~ Y_oRקXoz2, CF?bg%bN 0 `0 `0 `` ǔ%Xlmm5>7VQlQK5n~xEJ'9 N94VCW_10'GI50 `0 `00JC=T䔿!Ǣ?|,o`vv zG ׊ ?6 MXo6.U#0 `0 `0 `` b=UjQ!HS g޶PgW}?fWd@|?~_Y勵 BsTd-e90 `0 `0 ̆z9ĨEDXo6.U#0 `0 `0 `` b{11&ͯ _E(F(!\p]z~?}.v= Ԏ/;X[[+666ba ܜc0 `0 `1P% y7۳"UogͿ>}yw5B!bJiw b0 `0 `0 ,X/ͼ޿^v׊vʶ! 5.d>k9 9kX/๋j7oUsK? `0 `0 `a^P+ߩ}?j#~,>O;{db.*Ꝭm=X<.G'0 `0 `0 `` bvC"J5?5?m_8}~_Yz @ZFXe".K|0 `0 `0 `` V)Sj9Bb=볜a[?BY^)tW֫:=__q/s0 `0 `0 ,RwޑVHp)\\}B!ᯮb>5OK]6BkO&b/q|kkkFtq.l0 `0 `0 `*޶>T%칯lU}}5<} Ȅ~>}nTw9xq=:QɷU \aO|X/"BBH0 `0 `0 ,X/uRrs~b]n:g|ꏖkv@LP/ BU[쥁K `0 `0 `b!S6z,Mex!b_-0ݧ_ݎ?y׮VB[SQ w*멬}ĵˢ]Ks `0 `0 ``> $[[[C/1@Ŀd. 8sd!] +XοzBKBډs9żb0 `0 `@){ Z~)+ֳ~iGsv>So_;ׯ=Ok B/[/KXo.c10 `0 `0 ``> bBjD/6?7X/$ҭbׇ}\cSWh ȗ+_WֻplmE^͸= ډs9żb0 `0 ``e|o Yߔc_[Ymߊ?o{Kp1O?~^W֋/'B8}oz(pc1 `0 `0 `aݻw$G:w:7m#3_%O}>+~U6+>JDܞw ۋbcc#*ty0j^1 `0 `0 `3P%#t;5o}E`k}}!hL%f% ɱ`l6B:^.*;uhP/ ̋Q![ߑ$~?J C `0 `0 `71^O_uCOo[_뛳~ E녊zt[1~!y!^Gy_xa0 `0 `_hM~kމ6bL]w'A?_"[}7MT֋>z^'ץ0 `0 `0 ̊A!Ȭ [[[ ]!ӎY7!>b?Fs?oމ3GBg_}m^NBdV"K0 `0 `0hJރ}-MLR)+ BO4u~TH>gPGu!nGG).v'; ˹=l+Nd'>s0 `0 `00+J_RC;-3Dx3Xg_=,/_P/g}_<;otmP14ۉ\B` 0 `0 `zd)1rOu}|mz3ym_]e>mb=߉3/{ێhP/vb='8y0 `0 `0Y1P;9sUf~o#WӮP/4d~O]!*|#]>N}9[}ZLW(]P=K^hƗzX[[+666ax0 `0 `0 `` TRrs"UkGC-?oU|SÿƿZ*]\wpQ; ?vED,~.0 `0 `0 `g &[Ne?W+۵놬˞w~ZZ!Z |5B9w_薰>;zzPrhWYo.[)0 `0 `0 ``1 B4M]6b6?7 V|v|]vE=k+~Ks̑,777USY/kIH20 `0 ` yoW/$lmm5<wD;fc_P.{Zk'zg' CBCQRj'֛剹0 `a{^q-¿]0 ` b| M,QR)+-Ђ'o|2 a*ٿX|RqTꝋ&snqꝏ?vb=<.x0 ` ѬNo?/? ƿ^ _^s9qN'c2Ԙp `/u£IzDog~?ᱷUq_6`1 `@^ b{'SNy6b=볼ac77koNXOٵ&"mC8 K?OmllHN{.wܱ0PT Rb=%+_+KbOY[Q/ab9]L0 ty<]n Gjxg-<6VY82 `2B~'ղݟݰ˞w~Zߜ7kX:G}Ub zzg zz}shWYϥK `00.UbP=D6'ON,w^*7Ϧa^lf0 `@6,%{~kSY/f#u>/_|_YwWz.-^{QJѾuX/MT `00Ub Г(,́yrc`0 `]d`P:g__HcQ󳵵U[Yo}}}oQ|q!'>yiugl;h^>v`/vb=+]\'\b^ׇL>z!!0 `(z_{=^:x"[ i,n~m ?JAħƧUЯ=Ovgz ^bz.Luas `ȇb|֒]גXl0 ` Jޗ/Xf$ѫ ]qz9wZwf߹Vɿz+9wP'؋Suz\8V0 tb=,vyXbb 0 ̞z9o YYVXoYr0 w⹕^`_Y/v9Lg= b)|/vb_ l10 sgX3n|0 ` 'J޽{Gr8)osmzgy>rooߎ<>-^3(] s؈ s: ( `00 ۷o_}]!asM7AU~ys|[b6[nU9|flyl3mWfὛ~wu-68aa|Zyڬ]<Ü y oui^a̡_.9bLc6Bg_8*PŔۃ}Roc$֋Pq3]da0 O-G^;{=N<9SX @ ١5ALVfMe?c},ۧ1HkUp?œ 6ϛu5-*^5W]'> }~ f8͜ ?~QcX51>#&zП&4swV `0?1*wROuwӵbn>aG_[_[C|O]eR֧X/灆{Pb*Q2c00k-֋}XhݍaT4A|$&X7=G?~ԏ#~$vM']<14j}A$k$"0\I`aA `07b=Bo;Qߦ^̷_yBut&/KP5w^Kǟz''ˢ}ssSe=v00 ]*5M#؛V<Q'h kV'k#i[/Qk+N9=TP/jz +k ` tUW'Vk6b9k _MZU$&qU5mUE\XؘX/ªuuqOV9H׺ϙEe fk k%oM (*^]_x$= `0'XR䔿/K(zY||/߹g9_ G).]=Ꝏ^䇟~ҙAٞPLt)y9ɺb00O(֫zk @N\7H6nAM~}~lַF\W) ׬{З*AcC3ִO< ߴb ݅TCߚĬuByu}ibMz `VR>UfO/:5O}Uo+:{Vwٶз[-KXo.-\RYo `f@z" jU% £TU.#=PlP4X/VŮJ8dUgU#pޫͣSE55oZa]oӾg1afNX=bbM*5UBlG0 `j1p}qDmX_yOΧm9Ŀ+ƿz9ӂPop|zCn6n .vbպpIe1 `U WPUq08&S'ꕟSa|讍N7P/J|8xUq<{V=mE>M#~ӧ sx `0P%=c|{"Uk׮/_?7񝯾3}!Uz]wBv{g@P?z <0 `yڈ5I> ̪OmMW܏#x*8B*bc֦/}Z5&tfb}6Y%֛T,qM.c0V!#>DZ7=BІ17BQLvBzgzB*9J'UK]e/*\FYc `f@zVIYU4ʹ@N &خzkc{=YcXoݭvxbL[mkp `@Xo;Q~&Tצ^?ow'׮rh37MT֋>e. `Xo jF6kY{xh߯{MLcVyTka&CY3'}tXIW57ʁzϴc0Z f%ڪ>rf* -a?G)k'ٞ0ЃOΧ]`do}^r|z'μT+{ٶC8|Ѹ)(E0 ` ?ezUj}Wb7gzg1ךb0 V֋?`"7TV<]S=c_~k{OJYzW֋t^8acB=6ۉ\ެ1005ϬkT֛u\szM7EB_GXx㚇X/ymmmWǫv^z3uk0IAmm30 `h@) gyK9ƢϵYUo[;ϲէw}Wx/v"Rn⫏ RnWi. _+s `@ +Mļzo }nyahw]zڬ>X0 ``V Tr|w޷lU}ڵ+!˗5) _Jƾ{VIF7$VLԚ""Y]xN0 ^b;s"]X*]xq؟X/za-Kv'%km:`0(bb7MY+ֻ_̶!̿{#>Qs.4Cw⹗)b1sO^W=86? uas\Na0 zq,ETULS-nbPAo=;gU X}OR5IDlM}=mku Xĺv`0hzorOu}|~^î坿֗Orׯ]=8pG=މ ^ϿP/^:1`9onn^׋Py.0 `00oQ ޯI9ث:hc~c{[:n}Jlޛ֏Xy^|vs `fX/M\O~ڪ>r>sw>/&b/ f 4rׯ8sdZ\E&Rk'֛K `*0,Z9M"֋U n喱j03W}vo{U=.nMR5$UqM[u057U] ½aW^ZW%D馛Z)'5si '}yq[6u771a0+X+z9oʱJmzg;ў[4u~;O?S^+Ԝi zNx3/Vrv af; \/h 01@=1SMض.RWUխDd]۱ .^?JԦ { j c}b|k6c0QJ^,s~%TF|]SJ!W/D2?N|ǁVR_g/9D~⅗Ꝍ/>~gccUE `0bm#DYIPݻ+b0,q S%֛Dw-DmmeҪs년cL7)gMĀvuX0 ``Q TRrs_V׮]7d?r3?g}|G '¦OEz_@zH?=buQ>Dž0 `)2@w*ٸp %h^(/&w̃׉!,U1>i*c[]lüL᪱3[)-}-0 ` obbU爛z?OԊk }t}|wZߺBD> ut#R֧Xo߭rٞ#?nh߮^ޗ./0 ``X*WUsTmkcN\5U++\GƘl뵙:^[Q\ٷ13V]<`Gfb=qeq{ 0im 'ҍoӦ^[?uB+?]vEߩK?=w,777USY/i.G5 `@ T ԂiگMZ{q]UUς*DzS9 j 6 ©H٬4סNU ƙIz9X.mF |U_\:4bu} s0دr,a7u#ۿc'^ `X/$sT%Zo1z#L?G ?uB͜]v̑mC E DPDz.TrX0 @]5_mEM,Z6Xʯ⾪9؛:1ZgĄdUbj~Wտ wl-cbK&y.^:y0 ` JW/ڄTNo\''u;o/EV{dbsS Bۃ? KXeL1ƀc `b¬&4uֳM+ } )m#kz{[۩[e?K[!d#b>=TwM-k+klo![_g0 `X%Jgx{m7kc~Gg}˟vOZ"K pYV_Yϟ}$~}ooodz֧ zF:ϽZqj'"i~?گ1dˏ/?50 ` _kRf~_!Y OYzٰ(l FW1=6MOn3&q\ 8ּ\i3A[qض_8*a.~׉g0 ` }zwo~lU}uCC|͗/k~SʙEOaԯEzP+<@ڊz6^D& kh 1 `l ꂀo'}O^/4n?s .v\,g0 @xnQLMzip0k `00oz}^D f0?m*ϯ5B_k~E"w@Q_Q/}a,777USY/y_xt0 ` 7btX` 0 `k yoW/lmmV[__-N?*]7N9 aZ^^#wjz)XŊ`0 tP=/VU鍊n&Y?bn$kw!0 `(z?p_R~&Q>Y't>Q]ѐ}t>VA=g=Bۃzзkz@Y0 `Ho1*۷o*ݪtc0 `` bO/ sOu}|O~ӮWKM~o5 Bi_:w4 ?'˵cS}ޱSm'K#A*m0 `@]x,,gɓ0 `X-VrVbh)Ÿ>`D C߬~?Omll4 0 `g T ;xLzޭ!0 `1v"Uk Y5_̯MY__7_P**FzzzBD$~\Z{l|9n&땑^%50 `]e`߾}Ѫzo}[g* 'c]&0 `>1^S]_}Zۯkî坿7g}r֯4 =@:J(NWshWY .0 `2b;3X' `0 ta^.S]_z:ȟvO =Aٷh>ɿOkɳ/?y=B{noOjt|珿YonnR3*HBL `0g cpCջoB07tgQ֙c0 ` Oz9o|S]Vme1K1! cyW7fi~ę#Pl+ Bó7zy9ɺb0 `0 `*0P꽽B gH/WH^\ӿ?+VA=?sq쑨-y^^XW `0 `0 `90P%K=}Kw]n:g|e'Ͼ>Jpkz){~oP/oED9\P6 `0 `0 `ȝXo;秺;>Z/^v}}/u|T[eBiu!{z %c=TΠ]e=5_1 `0 `0 X/ S6T֋?':\#?sM+b0ЇRP/?s#hTYOe#]^{yem-0 `0 `0 b%S5綶j+뭯hcOqt?S'̙kGAc0g8>uuG$NB& 20 `0 `0cmm^&!Xb?X/aCBRC|쏲o;[.:#k.*~GA?Ϲ=}kE/vbջpYe1 `0 `0 ` J_~O O?X?M_'MwiNU;xZ'O}b; \.d0 `0 `VRBw)槺b=|?:ӯo[l9ӂPop|znx|P/@^zwqʚc0 `0 `@. b{r\Mdzۈo|_)7[}ZLW?b'MċBO?yPH_d[X[[+666n.0 `0 `0 `1P%=c|}qV :e~o|ꋺb'μ>J} RnAzED.l򻰱0 `0 `@~ zI=?T+{UmO>Q]}eGOޱӇ<~W?sx䙭"<86BehH6]XSk `0 `0 ` ?zorOu}|sy篭_Gk7ysiM"j / Ro ~O훛*멬G `0 `0 `HA^H撿*bs[[[G}'ruB0|#W>rׯuҶ^^urP/vb_ZS `0 `0 `XJ}'K'?3>B/O$/n~Y?'s?ӕӢ*[;{d}.^xmy{cۉ\جʅqb0 `0 `@~ bO| b)#ֳ>OSW:>swo]2-w}Z9~e';gʶRRP/vb.&\6YS `0 `0 `XVֻ'DlVk0O3G}f~̋Yӆwkgϟ=z^S BDۉ\جʅqb0 `0 `@~ b{r\IT'zۈo|BА1?uV WO,;=Sk 5gZ]?]\XSk `0 `0 `0P%=c|}a]E`]g?|27e}~|C/fOEz/ӿ ^ۏ9\{zB""\.)Å0 `0 `0 @LC&Tz_Xmo߲v}}/93o|Gtkv@Y+KCBF;ab=bh̜/o$0 `0 `0<hD?ݟm!cs?V ]?>Vq~튠Sz=o־Oz҉Yonna0 `0 `d`PS&h'?mmmՊGrg.ۧCÿrׯ8sd{R@kz9;}0*D ]+KX/0 `0 `VRw_*3姺>qz'iԿ2MVA=ڙ'BWN+ꅟ~G 빼Yc=0 `0 `0<(zw%#`)#1Uwf}sקW{kzgwՂb{)_}/s `0 `0 `@2 \{Z*U R &b\Y<79ӂPop|kgt|VﱃPN*g=6?0 `0 `0 `2o~s$r&y6b=볼ac77koӿV`ڛBۏ>MOwBvGף%~ve(> > >VGQ|GQ|f`}C^S]vXaY>ǿ圿7>}ƾ_9|*]\_ nʾO :!'>j3`}DFjqG6V_} 5DK{qr%OZ^A͟Ճm$|To4` ??Vv?#? m*?ƿ/??_+b'P#E}4n_űSϿOi7?GE > >GQ|GQ|g䧢Bz룏^^`=?>?"ϷN9xij~tK6b=\t|zǟ}cn~>Q|GQ|GQ|f`'GGG+cv~//g9_>N=sNJZ^OV]4ND>GU'GG1A|aT2n|G'(:._B"offfff6-r֧W{7zζ}{W B= c~6Z>7}QuPY#?#A|ćvXOPTiOI<>m[wu|zFg>=3YPA~#f~OFGA}>?C"GcA|A|'>ʟ______~V߭MxW~ډH=7ݬm|To` ??/GG1A|a4_LquU~ݰzCS^]ği/ѓ* ɥOߍ Rn;7ZOFGA}>zA=?'ϸ"???????`y}jz"+?M~~m9rbOGEŞg{BOz>:s#Q|A|b(>(>3````````_tce=ⷫ+?O;pC_jz??O>Fh~Qw>Q}Pd}!~?#?D > >Ljz#>Gği~Qe*wډ3G'AEs|hk7?`(>nGQ|GQ|?????????ɀQwG/eݱ_ Ag?O=Ovܫ}P Y=\?'{B}vG%"A|A|M$(>(> -?G'ESdͿnmkzcOŅz;m|ߣ}cE > >Ηܯ_r~'O:vV|||||||||ǟZty^eW*z~}BC}葿=(i7?u1|u>:?.G%A|A|MOۈO'+*~www_էްnBg0oo> ^H̏ >7b}QuPHG 5A|ap=*ʯnxqMELuOOOOy?Ce_FzaBCyhi7?u |Q>:?G]t >0*ě6>~w׊Wִoooooof9_!{ؓ=zOV(nPFϵ|FGciA|A|F/ZGQ|GQ|}'+72@? ~ˏ׫y]vEw'~c|+z_k7?Q>QoGG1v~A|a4->(>rVXo}}}$G$~-~r_AV;qxE=?ߑEv?FP|GQ|Gq ug4X_d|s.:#k-7 rn?ZvSwь|:>`U:!GX"R|A|ćQʸq| x pYV_YV+?V!@~i7?u)|m>:?*G] >0$>ʟNWi~߿'O BU֋?͞P,ۃR+n~)>`}TDE?#?Ƅ > > &mzl?d/}dO aZ\D~P{Bi7?`H#?c)> >è@|GQ|GqXh*=խE_t,iAwߍ8FP/mC$c~.|T_IJ>GE?GXL|A|Ə_B-W_W?爟Ϝk;z/o*XIW,|#+6k7?`э6?C/ GG1 > >V$GQ|?z!%~_E@iMrapu^[.^$A=>? > > Q|GQ|a?ڪ>#o濗￝v2$}Z(W;q|V\_U֫r_ >:`}#?#hA|a4%>88X/p?3 cg7Ȁ"_T/rѧ{db-(>  snSŗP !v?"#?#K >0GQ|GQ|*,z0XOs;?O+ׯԏ(^9WćmW/;=f>?AQ|GQ|av2`ddԽQ[e{[3'dClaÇBW}{.ZRo?ľkEHn~>fA|A|!//_q~!'OܟtXn>-I/|~"noJOAGP}>L?C\?#?1! > >;6,ʯ~???????fv*gVKXVq4OP/>-n#|>`Uy!~?#?D > >)[+{GğڊSor+gG RnJߨhk7?uGuk>* ???#?G >0Y6c?_AB\_";z(̡7Jq/Bn~}?H#?c(> >h"F|GQ|ǭz#9"[o[:Çc?)+"|*3i?c|GEv?FP|GQ|Gq ug4X_d|*sWv;Z^?_cPOh>Q`}D?O#?c,)> >èPe8XOPt\D~m[O+ׯ֗jzlC\+n~6R>7}QuU#?#A|a4;I|l+֓??NؿۿۿۿOJYzW֋?7=ޣY|=n~>FGc-A|A|F(>(>B3eڈ,>秇ײէݰn-*O?8cɧ,~-(>?# > >C 8_:__p~EgCB$'Oܟ?뮻놷_Qu/y߿xb_F-ޯߣBOvG/Ҵ| 1A|A|ꉏ(>(> :?okz?Oߎگ>}}} }O2?_qw_BO<~~D|TT`" 9GG1&tA|aB3_^`_+4/?/=;]v+'{^?mO6#d(~:GE > >#; >2VmeQ[%~_?> uCP/O>^)z7OFGA}>?CGc > >d qbqL> 2o iUЯb#=޻,^QO>to#"|t|UA|a(>(>Ïn??????`9H%x i=Bm{?>:`}TdE?#?TpxǶb=CS_9?OsRy'֧]ޠ_Y/vx"_/O?}Ƨ}0(GQ|GQ|f`'GGG_~ia>'dli1cB;ľݽW!K3= v?/GG1A|aT">(>8,?X*vGWˌ[ N>d*݈Xē7=ZQ/S/~gTыcA|A|FΏWΗܯ_r8?:?:?:?:?:?:?vZ+/rUY#؜|GE[O-R~ϵ|GA|a4->(>8̀AW?zoW_Wv2!iM" /|Az/B} B>M|T$`ꢅ_#?c,Q%> >uakkV>R#E|_E|qʉcmɧ(/7{^&y:^Q@o?-NGv?!nc䢆G8w~^y\ېPsUЯb+GGOWz?Ϲs_6QG}c,(> >èE|GQ|Gqݿ?ȿ?u%6XO/K!>O9ǧ[YV_Y ~9؇k/ҵ>:`}Td_::p?S#tahh8XOP{ޜI9ӂPop|k_=O?yozOf~goFيxn~>FGcuA|A|F/rGQ|GQ|sY|_?iUO}էݰn aO>T?D(> ?O]w]׮]PTX~޵YeUMWU6hO >_=vN;EA&}{G?ſKk7? } >d ѯ_??f} p~{wcRX++++++W-~w#~ݻw9gU;gD(Ij|wXXfBƗw~Zߜ7#B9OSקX3O=*Kԁg'n ڿɿ.| `0 ?¹'My~{WO WigZgz `0c=?vev~vo!~|枿6 Fl9>+wީE3}Cř ^w|,~~bgmN 0 `@x׻_{;q9oc:DwUO1 `a q?T|33綶j+뭯H'6?=G+ro;'ÿOߝ=|h)zP ^{kkX;Vs{pOmBډ?,1g0 `)20,|C6SM}-0 `RYoV _Gg}˟~fsPȉϼKwO+ׯw[Pzvo~FNm?SASm'֛%0 `HA^痫1Ǿ#SY+W0 `0О>U'ݲVᣬ8gZm!3sgo;o{6k}ڰWY7m7l'kw9b0 `Rf]7d{)/:H#w0 `RwޑV4S]͟vO+_}!c~ QmV-۩?{+gID OQ+K}؈ 1]0 `@ b_Wٞozy0XG `06 Tr|w?"Uk|_󛲾oT;T[%ڨP3ž|8|,rhb='B6 ^ 0 `e\7mMQke0 `&a &!{~7׊/ߒuy篭Yo|{>[UC_iP/(7a0 u~"+MyA';I0 X/0Ʒ|^etNcB~Bc+G+ އ=͡[3*aDV훛z+uɔ%un0 ̒*[8r)Xv0 `@xw~ڪ뭯H'*~_?}vɾw6AV;{ж~@mz*zyg17zg>ɞP`zݾdp d}0 `f@L59oˬ` 0 @)ֻwgy+|~mE7ffWAv 'O4rn1srZa0 LX/ F.~0 `@ byoeSha։̜r#3$$$vwBi~;Gm;}`;^.0 ` r=ߔsw]^{mGX10 `șzq~1ڊ|濴ek~s{b^ g~=)5g}Z_7W֋t?PoSo|oɿfE/ϼ\E%J >)P/>$Ft|.2Ȱ0 X7u}XPP0 ``E1ٿ)[U_>!˗59o|/}z!K}Zn`zoنDV;b_$u0 T10XY>ߔG 6 `0V!{jNXۿ:y~d| 7} ?:[_!/x )1BH%K( `0P2P'`_ep)/ pa0 b7,bίm*U u2:_o/eZ! SH|#S>rׯ]9|_^ﱍh|ٗ^//ޝEz*y0 ` 0P%`|F(i:I[km1 `j00(|3ȱ-.?U[Yo}}}$h}>QdF[Tl!8fSrЧ^g_sӟo(]V_YG~X+̶=l䞿 ۉV%0 `-MϾbmmkhV0 `1p=OXg)_x槯`CD~P>-ǷvA̡ ?E;~Ho#/{l|)w1ɚb0 (z׼ ٞo%s7oX7|90 `XJ޽{Gr8)oMX_OΧmyoͿ5gli1}]¿~"n;v n?ObmllDp7FV0 `@){d{)o?z10 `*^oKlتuY5_̯Yo|;_}QghY0WڈX̋gTG'"G^*^R/Rt|9~XO 70 `Vz[6_ \ \ `0 @L&[WAXe?=?j|y翭Y߁o|;Y#>]+KK//oF;ψV 12*(k}1 `h@|S^T뱉66580 `y00,|3̩-XYRh;$[ QW_SH +4|_"^}+a蛛zz*`0 U/HJ[G `06 r< ΁-N(U+[__y*YJ/?%&^sׯ= 8{t#I#;W{lhs580 `3v~氪zXC `0 bbVu5qz?)}2VBv]S'_X!{1 XυG  0 `Ha[盒QbYk0 `8 bZ $X`^-d~֏.BL]]eǗ֧]_Uԫ}A{``;Kq.=/0 ` ]loJ>ZqF rl0 `0b|~1ڊ|濴ek~s{{/þ7O0v@Po`|zN_\P/~BDۉ\l0 `X J5o|C۟+bޥ'60 `@)ۻwH+CX_OΧ./ڤz~li1]?/}v"RnЃ_ t)؈ 1W o.0 `VRo=ߔ7*ش10 l3P%=c|]7\u_ߔ/~%c߽zPٗ"|ND玾\zE_TPq0 ``ZYٞo#%0%1 `071^S]|kzuy篭Yo|{>k;xaZ^A1j79\}Qc}/0 `fN.r8ߔzc0 b7,bίTY?|-,+2#n׮9\p"|Vˠ[W ~~o̢}ssSebju.0 `*[Nrz/0 `Xz9oY6綶j+뭯H'Ϳ%= f;}<[m;@42h?߫W 5BBzsarZc0f &`_t%t~fz:_g1 `j1P,7%Ϲߺ:qz'~D!d9oͿ~=wkN*^غWQ泌-<-^EBډV%0 `-M7rY{ `f`߾}E60/X0 9߶˯#bsY>yUh'}/ʾsקW;Ź+zʗm?o-ء>vb|/(\>Y[ `0Al_gbmmk%{/`*2pM7??U crgJe{U'ݲVJo >}o=viA7[tTs؃_'ԋ?:6RRqKXEM5Ƈq `0@)ֻo|}J ^Hq}$_ozӛ-R>-^lpOD۾HAn?rKjccCreL.CW"x]b0 X e{)oz7 9Y~^,9Ol ݸS`wy*їW{51[cqJ{~替w]7ܐu_ߜ7%czsUPZk{l"RopFƗC$s9r 0 ``< >W2t\tшx +OsY#VgeK^6zcC|J:ek\2吿=?V+ֻ:aG_[_뛳>|W#^x nhq"O|#z 0 `+@-\p)/:$n$!  .j^bÿo,UaUUV^񓁙Y<7P?M.10zo1Ŝ_7V֫CbN¾/Xj!>~J?ЇzCs[ MbH̺%y50 @ќ22b=6cb(WT *aأbwmkbb: s~: s,S SJGmW3/|}oWAvKg/zB-< ǎG=ޱ !_zzݹPpc-0 `-M9zl-M#+?9{TKk3^ɓJ{UUK W2oC޷{-hb}/n*38oBu8=*gn ;w)]V_Y3O+*΅mpXwYﱃaz.:.:c0 -˕?~/k::*E&{R:J7#Lgfާݚ8Oz9TֻYoڞr?-k|mz>^dַ/dw ;dO+u~z^@o m'sEa0 l3Py=ߔ?=zDzf% ܅s*tMzX"^}-ͳy'b=t(z 9w$zOumz7u>u~_&}֑o~#[}ZL/|{k'"~۵.O|ύ  d t.0 ``Re{)o*῍X/TЋ8"^;&<}ĴK6>kXypd@X/7/|lU}ᆬ/_//~~(bg\k#BzE ~o0 ` 00XY/v~3߮${YR  ? m`l?ͤ} c o߾jn3ks,^0bF\i/܅Y${} [8Yc>.&ĞTeE_>$p:O^_-kMbb7ߺ:?Z o{k%?=?j|y翭 |Ϝk;xA]OWXLث=wzx*X0 `X,upCXRY{!Dxnx"=jY8c|Cn hӗ纱^m>|M”0㎩ l  bgJ_ >i{Џa۫Zgm|,c-0Z\9 _qd}RF 3|,9o~a^naΌo16B/坟֗CgN޿_";'3{c4‰@|/MVKn\Y `2sO.r~Z[.f&['kdYx&`Щwf%B^MYo3{1KA$H `f-ue AZLƆ?ofؼխݬlNIiX swɉl1i[{mUӪǙO2?VU]Z46|kRbica~mlkSr< h|;nmmV[__ZŭOT}m:>Nwmc7=ʥ]m^y{Ejz\8V0 `0v~UJJHP'VcZG4 H&:Ƥ³hJ4q0sJD9/oKִbc|ߵD}Wߎ=}䨯`[pXq$c}Z_70KFz};,A('>pGz Qt0 ``5(z׼ٞo%ُX7l~f%֫#+E">n5~T<.2&ڨOUe>Tշ#S>i375uBݻwDŽJan* M=+^iu R%j[oym[NUg B:#Q[_HUkcO9i#H-Vy8b|Z6B֢c4lgǗ ΝGϵRr)?毖=6b=|?]6>~So|omZ~VE/bq[w,zzœ'^/?K/% 0 `@){?g?D|SϞ|'zzbizARW o?W%nGR'k>M¢6Ķlӧjq۪_m>x k^s+j(߯JFV5' zm*o ]c(T+S f-֛%66b0GUš즮=|vv1w608+*W%m*!eV*]zq;\6EXoyS\W7K!{}פvs~mSYR;#[ W_Sq|_"ֻpxq/ī*-~+?AO1EX777USYOe= `0`J5o^E%g) 5nqT\RX4P/if9³:ci, b:sb0J$j TիC+mWE넃GN"OPn|1޴B:FVYKk$֛45 ǩI$蘆xw~ڪ>W>[Ɨw~ZQ!4U;(wÇ'3<[T>8*E꩞|h MX/KD 0 ``Rbp|C>ƱY{LS-wb[mBb>DpS%УYʬzLZqVbih \ҳ`gac}cFX'Nqk'QUٳaUuk&^4˴>ʎgi#K׶dy)+ֳ>y/tO;;>/K}¿~=wkϝ+^jz9@x Rk'K%0 `hX/ {hkuKL*I+aVk.D1ӬU9Ϊn#\XoZV6J76ϰ u"\ϙXwՍ/&.m[ i|U]'16ikbEOm:T=~jZ;n^^WYoV&j_Gg}˟Hǭu-*d YV_Y̋z϶=׍6^.0 `8 r=ߔ䣏^{mG81qeD _AS'D 5@2ZM5Xi¢I_1^'`Vj[m.U#'e'ޤkW%̉TYx}h|4s2iYT9qPO۸9:Qls Fg%ڛv쳮7meXoJVlˮ?[?7X[**fyZ"#ſ '|iB[|\4qDo"~PDO*_d/vb.&\6YS `0Xk|g?~zDG}ńMU{ȤJoɬ* W[#;'|#[qMM W=ޱlb:SUM2[U6 5 Msl鑜]MֺleLL8}Zqܬz]\y0ZAcɤضz]gR㉔7&TF|]S< 7ߚ?y$[}ZLS,E;~tcK[?Obmll4^"úZW `0 b7}|OUcv=hJ DG}I$4I{rm, k&JY*iMG$b^ɓc"ξc1ku|a`}^ be,~l &yowf5OM\jo?uuzovlU}ڕu 7-ʾCeiU^?aP/>H?VJ}b='+NtFҽ@v0x<WY?ߔGbm2ӈxJ4N%Ef)Xr8NObPAi~F(8طEd6U~ָ ͣe_y$txjꠁqě)Tl,O#+V%/mݣc}'/oB~'ղVw/beϿ;?m}omL's5=rU9vzɏz)YF;vpdM1 `b ض/?ߔ}zlTlUݻU+B꫍P/k.TlN\JaM?Ъ*͊}ڌ+sZ^#3_"ֻ c>UL=<KOˢ}ssSe=T0 `Xl9o b=6iz3*4i%65mXME&ML+ U%@G\6 ۞v.ߟM9,Iܷo_ֺ:b[g q9Z̃~X\ge'X/Dm>Э;ڪ HYTEo| 7*wÇJxG\T>8^eh 9ťډһxpYd0 `&a &` 皜7뱑662 KV \P|̺/9YRWՏIRm9zݸ"UkۿiZm?a,|]Xĕ`^?;- .<JHT-zM{Rgy)+ֳ>y/t!L;;/K}¿~=wkϟ/Aq{?> KX/ Q0 ` 00,`|CqݬDi[J F2X۾ve£[cB家Q*f"zrcdU}F WX˶+gՏI`wfiS{9TLt>&Wz]\y0[ :O+z}B o$ەDmg/%|;7}f;=}^r|z玼ԫ8~~GmtLq'z3 Tۉ\Ls w `a`PEJtڥVEv1j]}lbY6KƲzA|kLZR>/fm2JTUBYY,m*`cBcGP\7'_Ui7z]\Y3:N0^go{zc|ߵ79_V7ӧ֧ ._8g BAz(pc1 `5d{ o~ozs܇Y}l"*S]Y6-֫ԉBEx۾nVbyjSI-nUæ*lm9fiSsDku W|߼U&Xk1kF죊ݪYTCf6c)֋;'ղF|ooyoͿMO
8~ӈ-]YMl q{~L*DS\ϛXo1ubb8|nl*FFO>ĘX/M緮wuX=aQK;m_]'>c9v$}ArgI*s!rZb0 1P'`'28ߔ!zl?@J~Uڌkf)Be*qSǩYWxqqU՜FSUmmwLukfYY/.x*V6f5TI{Uf>)׮BIOUԫ(-AɧKSzf!Ҿ TQqq 0 `J.@S9T%BO6>obi\UU3᱃mE[a bзGSUj}1U´зk[  V`5k4xeA4@-gSB:^)kFUcraU_{]0oU6FU=Ɇ%֛T$\Z,KW%~Tinb7ί[[[]_[ŭOso]2]vmc^5S6j'R%0 `b &`_d$t~Ez젍/XJpVLSUm)IR9.7h\QMُꄐeUAW% ߦ0UXAҴU6c_pϮ[W4 ޷ \٧GA7JRA7+{ :^`jm;3/|}oWAv.z**ꅟgOd=S_Q/vbոp)e1 `l7%zxof- Y%h#HAAM]_s*gM:'MX/ܠ0$]WjGaU7Cx)Ehu4}7Yo9vUhЭ8b=BW*ezRխN,>]>CׯwNѷ' rm?{8}ghﱃ~yzuizc0e`PEHɟ=֯tFo`u`p!֫#V%iy[UlN]uIV4 B_fBe*lom*q1us5ƓGYM,6ҺS$g^ƩPFV Ee*g>\eD6v5yTYoڞr?-k|mz>^h~YI+/e_YӆwzQZxO Rngzy9ɺb0 3pEw5ٞoʋ~z@CW'F{Z!CSU V{eH,Zzӊºd㊵YY XCqڊ=šʦ}4pv>&U=v2_ } OwV*M"t |Ʋbb&r[jWk/Xbo$)6b=|/%9GYJ/ٟ>-^􇒆'^EŹϞ-N?6gBo~kcc#:>ua70 `ȗRwypn '3b=bFbLX4c%cBإmu!r >.7g#Ybbij1dqsmi[`춂s{91rٸ SjX&68+!lG϶{m+΃MB(:):< 8ci29K>MII`b2KYq}z 7wߗTw:e~o|:oߩ*?rѽ-{Ap{?s!vb=I$|.0 .bl>ߔ|/W  50|}\{ Έ} q+] h843{csW5}9m0 u ?V޿*vq~+ s}dsZ{-> %i<֪l{ V}~S0Zr]K^lp60kQ3VWq9mwYT^>1&|OՊv]t/Fo>0k;=g-t#}R{e~Us)ϥ0 `:[8t!  `]`X }6H@0V1sEg-֦zoFߺ:ms0֗ii7s^z 65B3/%[lrT2|c3@6Fأ=Lc/b7sc|;nmm?w}}$Gj}>UB7ȾCDžkgچf[LP/Z Btډ\Ls w `a &`۾LXV@ ;g'[xo^xZ){_|S2Wg}_*mGu}C_;~[ځ{*^x8uAs{ptYp `0 9_Fz4e13@Ѯ3x#b`+$[jb2=\IL^k|.a_3;-fSwO{}|zUԫxyks'E/ .%Np^$[0 `[ R=\ٓ^{|cXz ٛvc=bvk}>z_*nYk+[VQV3k}﶐ h>}o=>mHׯ;zBG&~A{t IF `cXk=/:zǾ-b=✥s|2_93@dlrKwy'T{Rߔ|ﴉ?/y `2@IBųa^[Vb8̛X/M緮ogVr?UoOܯ t޾3֯H] {Po%~\PYObe0 `J0P'`|S߈#0@6knƨX/U㜝vfӢ^4?I0EFO?-d_ 6o?+}^ K BD777U֓^˯]~Ks `H*["KEPdon@n a:7O<Lr< w~ڪ ,n}}m[msҝ_UԫP$~|_%|yՅ0 `61[8t)$6|V7MŠ@)[1 smՊw:e?ǿo{Kէz;uуmʇ_--t ŹB'I 'VbIbO `@)ֻo|mO9Hc0 Ub|k~替Z޻/_?7񝯾3ө _9z/5Rn xosb_NRܼw ``eA6#2`0 ``> zDb_n~_w~g~ .>` O>c޶P/Q,&~ 0vs9żb0 t:A7MY1Xu0 ``~ r; c|9moq5|b쯶"jFW} 2/9/ܿe_" sGzoxW7کonnA`0 Up"pd60 `]c`Pf8{jzݻwByꞌ>._Bz**~18hLXJ.V0 ``> l\dXo> M `0EJC_{ M9繞ߺ8}sb~xGyE>sO'E?[ߎosWڥKŹ~Ź^JP/vb=+]\'\b030,`_&v~5'֛=?lҜb0 tR_*^n{׻U+{[2*| /VWHǎCH>Gz9^_ޥ3{BroOD/ȺXl's &0 `-˙oJfzbmmk7l͖-i>1 `z_*s5ўf)kb_s9cN?:\nEƉh.cB~kcc#*\10 `XJ5o|cEm|^6Fc0mz6p~f;zhcUPu/T3!>?ha]'lB}݈XP/}G~XO 70 `Vz5NE#%0%1 `071^No]6V{׻Ey#ʾ&t#t:O(_;;x/*Q!Z- Ǐ/U,t9%K( `0P2P'` 7X|0 `00,|3̲裏6 k_}Ǿ!2b?gGEP|+/~*s^BF33mnnUT\LŔ0 T1P%`"X0 `X/ !U絾bwx#!!J䏣?g+FLȝPCۋ![]EFDCŽډV唵0 `@L?h't~rz:_g1 `j1PzkYov$23<-{|ϟ/w6 g{ Y|\~}>&+_;p]k/+ K'O CIXo.-\RYo `0 ¹&M7rY{ `0z b/ 9Bx>VB_G`|_|r7_o] n_֯wܹހc0ٶ~Pxb#~]Ņ*k `VA[痒ݦ?ZqFϯl`5m[w `0'W+=_%i{i:iBEB+R_ix_`iCW#=̦ȉpd]1 `~?!֋$Q~0 ` GJ޽{GrX)o$b'9zhG6︃PP/_r_3 5BG6!/l_\:Y_1/;n؈4r< ) `0Ud]s=/{3*ڸ10UeJWjp~&=V l'=|GaS֧U"1PRţq{? mlCqG|>FDT;R0\cNa0 "1^NoߡCoH/pKKɓ'G?s!jb{;u}ZuJ^,.?z_:uVCz.oVƘq `*2P'` 99oʿx'chƌ{ `Va^nu5ίᑷrK7y,["_s?wu<~l~X/Q^#K"ޥ BD777USYOe= `0`JֿHHRUqazի6nc0 r<ߌ$3;c|Цh\qc0Uc`XsX}}/1 `XeJރDbnA}|'}+QϾ*腅آ2'[<M._8_h//=z3g6YKc?0 `m,οخO>Y^{mpX9 `0W-nx|>bb뷊P/hO93_X5Lh!">eSן~esMd빨0 `z DO>X_s?s޽{} `0 d//aOh|zwAQYeYB_IņӆOD:z{r*NDؕ0 `ȅ+bkɮbxF_|9Ǘ90 `0o~sGCE<+L[E8?N(&4!4!4!4?}?$>-PAx) $oXH/*%)m0 ``^EAK? /?uGk7?`u>O'w 2:D i~~ij;ݿ+o!ۊoY~|HJ|ޓV~A]*C"=rh?{_wܡ>0 `*00ܑp~|>[_HE/O/|_xdm,bҁo.n_;2}}Y뇒ቩ׮ D*멬0 ` 0P%sіw/%$R$$$$"|__B U9~xϻJ?X|(Ο?ߏ?~/i!]g?Xw l"1s ډTY*2ƈs `0Pz]B痨I/:͏>Q(`}D?ԧ+_x*=O<ο?oI''S՛^{/N.W~9szfI\K\c0 bpeU~~}_Eƿo9?{ɀ&oO~Y|Е.Ky^x1.%$ʈV#1+o1 `ze.\H['G(; >0zw*>BrUb_z.w|!&N'i+q `0 .%z~"1?u/>7`#^X? yGGqA|\ u}PGL(P.k7Zn;bcc#]ĭu0 ` &MJ|޿o;w.>`UdxED\i~-ɷ_h0lj"jc0j V֫J|sY녘T<`?yX_[{Q/>CeE?k !~OS??W)TC|B(鷫'IjIj<0 `@X[OEa;㫻GE9`}T%أ_A|a'ݯ/}QwWzM/\LY*멬}&0\HqL.]ԻwQ?Ȁ*W??????8cÇk@+UhsuOXO2:dqa0 d &:$:7 =7wU7M|ķ:`}-md$>>=wP G}Cbzؒ0 `X/kzov&}o?GH3#Cco> 霯z[֯K m1<ڦv:~bF$vbHJ[g `0A޶P/Kɱ[?@c~H>j}Q%F+q?#>c1߯W9cQ4u3MdIJc0 b=i.\H[EZ+a> %WU|_WBBG?>F|*>v]_6ExF+ tO};bcc#*Ĕ]ĭu0 ` |?&|>|_7f~P!~z#?c~jW""&~Roۉ"ϗ?Ym10 ``e?Wk}k?gՉ8>poa)~2կj$%0 *b=Y\4hvEFAA?#?Ɗ >C!S^*[^Bf677USY/kb^0 `@V T\ĺ|KFFFM|$%%SM4__'#?0>_ѷٷSXOeU,c0 ``\tvEE > >OGQ|GQ|dz֯dk6YmD$W-Im0 ``UHEB>f* 0O?+ ;οjjjjjjj|"\GzI6sE/d&0 ;z.o~d Ћ_ ______ 888|0`hs!֓^$b0Ue`Pb:!!G%$$$$$o'_~/ ~iif& OP]Ey!c\PFXۼbmm؈'*b ?s `0Q+*Eη.]TV7BxxxPC]?E;=?wI&w4o[' `^׬>؇NL+> >j(>_eT֓ W2b 0 tZ^(/FEF.;ux ?3?Jȿes>hdggggggggy umhmnnz r b0 b=7W$~OKKKKKKKjѽ $8{PYu!^Qz^>c=0 `@\N#?cl"> >((>(>#>4 4id[0 `[ zoi߆mY?3 #?cA|A|F+UMR>{B?틟bn%P%0 `99=G#`죊!?#?Fϼx}ĎWI|ħS"ڤRЯ,:('?b=y%/0 `@!s ^Mv~Ǘ> ¿$W5o?D!~ )~B[8 ^0 ``^ ,.b]ĺЍЏQ"A"aqVԲ???!` 6n$7︣X[[+666~3V_u `00J"E0a.exE'...." 89999992 >رQ /HZe=[Ub 0 tF^xxxxxx%$$:h''B1B1 'Ot?9ɝA*^0 ``^ Ԋ8߹r4닠DDDDDDD WCs '֓ W2b 0 tJDDDDDDDDDDB-CwUUqw?p7t62Ymd@ж0 `b *s|'%%M0%"Ut~CH^fI[FsC'|p3Ɔo `0pb=0 `0 `0 `HܕFG5:7_2_0 `2@Tob0 `0 `1pI:G 0 ``b=v.0 `0 `0 `H5b]+bִ{0 `j70 `0 `0 +^左50 `1@ 0 `0 `0t #@ p8ake0 `b=LÏ0 `0 `0 `` je(RYoFio `0e1@es `0 `0 `b=b= dg834g 0 m3n1 `0 `0 ``U P͸ `1@Ucx1 `0 `0 `)3@G H)黠 `3@~pe0 `0 `0 `X6Z TXX|>1 `b [<Tɻ[7c4va0 ̓b=b= d<0 `@w Z k `01@7̝C7|Xݻwɍɏb01pL|60 ` [\\c0 ``q -nsl]e/T{ $1 `RXʇ8bʦ~a0 ̖b'>'0 `^7!5{x'mם?`O^0 ,`jK"[ `00>z0 `^רvtb}}`/Ӻ:b0 ` _2Tk0 `dX|0 `rdXpoy[{qA7g]b0 2d`\G0 `Hb4׍Y7 `0 3@Foۍb뮻?H'O&W `B?lڍow8SY<Haޔ}4ۇ92G02@ᦓXoFio `0e1@es `00O5 B~m= `002d1 `j0@̞30 `1@y0wV~XOL0 ̝<6޳C,0 `X Vcٳu0 `X50?Ԋnfb=ys `00wL{Ob= 8c0 `X#c0 `92@ypVwXOr^0 ̝<6ޓX0 ` ? `0< s]mT]w]:90 `;seoW=!v7fc0 Uޘq `gX/5^ommIˍc03,c3g9θa0 l3@0 `rdXXW{ 0q P1J30 ` M\7f0 `0Pj\yO\a00.zzȐq 0 ` M\7f0 `0@b990 `eP+Czg `0 :g `0j y0Oyp=q ` a Cu^/x`04 Ks؛u0 `@=zld6Byp=q ` je(RY#x=f0 `@ 륹nͺaX /b3\2wl1Y0@Yp4zWW0 2d`\G0 `Hb4׍Y7 `g`޽//>Л0gXo9]`0.0@zK>0 ̟b1104سgψXo&.iͺ51@&F&i'$`00k2"(f(0 `@7 溰 `` - ؁5aX?SzWW0 2d`\G0 `Hb4׍Y7 `gXoscs ̓b=|̓/b=\̓++ `eP+Cz1 `dX/uco  ֛cdX_XW{ 0q #@ ^0 `i2@溱7 `` ql1y2@yEyp=q ` je(RY#x=f0 `@ 륹nͺa?zcc `` k|j\yO\a00.zzȐq 0 ` M\7f0 ̟b1100O5p5'0 `B Z*q: 0 `Hb4׍Y7 `gXoscs ̓b=|̓/b=\̓++ `eXX20#z0 ^ެ03@79Ʊ90 `2@PKe=`\G0 `i2@溱7 `` ql1y2@yEyp=q ` a Cu^/x`04 Ks؛u00?86< <"<0 ``\2j30 ` M\7f0 ̟b1100O5p5'0 `b=b= d `0&zi{n@J ŷ"ʯ}Ǭ?g3<32cǎt3ŗ`93@yMyp=q ` je(RY#x=f0 `@ 륹nͺa]g 曋__ uA7ɘ9mzQ90|}$c;d M6ox7b=\ `00!]p. a03@79Ʊ9VᄏQ8s5i;w/4#Q8cZ M>w;b=\ `02jǹt1 ` ֛c@ 7xmm-szqMU!}0V>Xz؛{zWW0 2d`\G0 `Hb4׍Y7 ` U+j+?&v -szJ {ls M?8Cb=\ `02jǹt1 ` ֛c}0/رcE zߘX/b"0ν{AB?۶*c$00p5p5'0 `b=b= d `0&zi{n@:^-઄}x9ubq>WG$V=wz N)0 `2@PKe=`\G0 `i2@溱7 tX&$cX녾*zMX_l`3@6azWW0 2d`\G0 `Hb4׍Y7 `k E}NX/T;w\1V=R*<{*1@yNyp=q ` je(RY#x=f0 `@ 륹nͺa]c`Q"E}NXmE5wwumF(0 `2@G x `dX/uco Ǽ~[ߚmQS%֛DWU};~ֵȅb=,σeb=\̓++ `eP+Cz1 `dX/uco 14¶-sz= Ldy#0 `]`XXqyWx^(PĈ"ct B E "CPJl@( ؉ vhhX]5] tWy9_W1С-L.! 0 7 ׾9 0p_[5.Vpݟ`׺N^V[ZG=ˀ^_j+E`m@Pkl@ޱ,j 0 0@[zm_G2Zn7˗uT.=z惹v֛oW 0 0zz th` 6x1 0 c1 @7raCo׺{>YՀ-l q•sr 0: jYYDP;8```Y7M``?mIhoj{׸qq]l2Pf@Xx'aK1 0m  1Сf SW`ؚa=&fR{d`bռMη_cuQ[?_hm@XƄj9b`ZZX8z&[sM``ֻ~s`(^|ShtX-oNߴ:ǣpv m= a=z^xf``ЀIyک 0 Ƌcya9b? E־s{7 lǀvjӸ'g`k@Pà; Mj 0 0Pc@X/Vm׈qa=װ1p;z qճo7 01 '@LJ```a=~^~``XA/^}K#Z%nc@X6޻wa=z7g``: jYYo$N 0 0K 1Ԑ3s N~_֋ͽsm{; lÀ6x7i`i@XOX 9!1 0@a=fj8X??.]k+vιkZw.a qus`o@Pà?Lj 0 0a=p1\/_JXoua`QƃWv?L3 0Ozz th Yԍ`j 1Sk0?\%7:ziݹxbGp.G`ؾAZV39 0 08ZÑsp?Ï wka_s]:q/zc{3 2 V [z\p\1 0@aY>^XDpms 0 0pzw; d7?EߋC{z߯~9yk]gq8u/<р- q•sr 0@Dx```Y7M``Kb%!pMҦzw^:zזƗ|z| qe|0 0[0 aPz&-L.! 0 0ހ^>X3@Ct硽Xm/V^THox_nUk]GXϘ}̺?Əb@Xօj9b`Zzz thv"p 0 0Ozn 0%=Vsau-/mᑁw```@PàL.[\C``a}̱>f =V Ŋ{StSa@XouxjofY`4 '@L}Nꪮ 0 0pi@X  0vl^|r2wk^'\67{$>8B;?z SFE:vZ€Vʬg2d1 07 w>\3 0 0ހ^>>ca}ʅu"({b#=?vz?1_nqlk; cC& P?'`ػa=nXf`H W^r.^|yvy {qٳV<Çe?uNa=a)#~C-ԢZgrarr3 0#} 0 0 f{gzp5'w/B`Uۆ'|r@Q('wT{FuNzz thddj 0)z\``р-\ al -9͵A 0 aPzɝ``QgY``f@Xj^z| v-l;rzzq!ЀI|W`l@X=v~``rh1?՚apo'{~?Ghg: gǙ> ݀V¬g31 0 À1l<3 0 0 1¼>\Oa=[C uNa=akYss a=a=:4`73 00 w: 0 0p4z̷0/WkbV%a |+2}^q'ORf :?ww],o.,냸ӧO֖9u᜹~kОZ{Ͻ?j4uqZq8on_^{nrlar޹ߖ5o]'篟>4 5 >F``u ӏsNa@XOX M 0 01 Ƴ:3 0 ̀- Ֆz灶Tfn'BB M[cϗ9ޒUs%s>wt: juԲޱI]``;Nkf`8a=[xۇkVKcxOIP/X%ۆurA8Wj1+clU5rϞ=[rjk%x`gNx2K=EͶ˭8b~^26re֬\yʍ%lt}<#թ: 1Сt?Z%[IDAT 0  0 0@nZXo7U.pTXo*Y"<U}S+쭽X.HAx^0X0m迸N }M:/k,? ;Qg=c+]XtNύ\J|1vdzԨs-ZgXkp``mv}a```ayx a\'occ&Վ]qV>˅ApXm(Lcằn ?qմ%]gcO# مҹ=׆hG:犿[sҶˍƹOm+=էZĘL_֮8YrNyN4f@XOX M 0 01 Ƴ:3 0 ̀- a\ njђPɶ9˹TjBAc+mV~* ȍmZhvmɭVȒc[׎@TLƼ]2JHnMrL= p^x:0ee=D 0 0p zǨ 0 0G3 | zpfXol)cS%A\PdTsN4n D.7]Mjjjtj+ZwS8O@IPohGwjݩqS㮴&cN\׎6wϝn_cCs&a`g@Xo53Ԍ``iz}Q} -[ThhXJV$kg.U{omeKMszضù~ C.`Wb}c[Kdɩ>eNSs``z}xTG``~n@XϘh1&jͰ^J5)oz-Vڟu*<6we\ t R̵mͶĹRFzc :)7w צT8pIw7uRsZg``a@Xu6ՙ``hoa^Xor#l+c͵ǰ^l]Z]p* ypKIkqƂ^ᾩ |SD/Yq>Lì0``c;Fguf`8a=[ۇ\H./"xEfI@5zsW'6l MzTsr2- [jW+<[XoN}]p銓-ι:NZwAlVo``[{^``z6 w zp$pV ks77me,ښ{9VuӖk/_֢)aϗD/]qrj>eNSs``z}xTG``~n@XϘh1&aa~I-nn+T-51' 6-^Ysjzs[J]Xdrt>ڪAZV3lu.6``u ۟|O``a@Xoum<Õ: z-XOKsV[#4' Qn/%`iԤT-nzKzQұƊ%5wyR黭cC[`Î`hk@Xm``a{ޅJXo~rن_n*8ennnkcͶ, Kh!WdKNm[s;~|ݭ juԲ3 0 \ǀug 0 0u ][XoZj_s9Ag5~ere˭pSיۜ\w*45-[%/{a5\#:_nxz>]N@Dx```֋ 0 0 kNկ~}ź 5 VZkҭ:gkra8גٖX]mI[jߛZͭ&{X/J17ed'k}r>eNS: jYY~ 0 0Їí:# 0 0 €>! E)BRsF.06|ځaXu5&\P4 7 ͩ.aszq[rcmn2uu*@zm5uv>wЀ[}՗`z,``` LF``u ӏ^:rk!1Cϯ4dehXmO}!\(-DZ`%:ߣutm+k›'qޱ-/jiK!5s5#cAa|,YqGk0ee=ɵ'c`a;``؟MŋPQza(d rzѶfKҰjr5}6xY^Cn;˶u_SmYd n%!胥c%_awckjd yogշ  1С{81 00 : 0 0 TX?ֻfG 5V%ۿSSXɬ/ - <={o4)bw*5<4mx:"vKa5j]p> t>>1$u-9[XrN``mv}a``ewljvۚ-7xv,ZV[#FX/5l%a[#퉕jۓZ}/Jp%[_3jqZXoZ/#̍ae¥>om@XOX 8É`؆amxP````=z4{w6Yη ܱywwS+y_";Ц-]n]Crֵelj Rŵ>wJ;&!̩U ѯQ%}pޖ8_5s;owKan?/D]Z뒶L[]嘜j㳈:Sε M9S 0 06 m.Ƌ0 0 04ApEfXXe0uͩ~[zΥAԸVy_ϸk8~2cC&ζտ 0 0z,nŢv 0 0{0ŋɰ.3ߟ2YMXt5=ȓ]Xχ^f``yo``8}=zHGg^3'$W{/  藖"ZWj 0 0Ā?Kx/? 0 0 0pDwww{M>3 _.~^zfb`@G{^j7 0 0@aK1 0 0 ?x2/|wߧ3?@^j | `"g  0 0_zqv 0 0 0 DduǏ^z%Ɵ{5ɓ5^IGnc@XzL&1 0 0pmz]ۜ1 0 0zӧO0:>Ü VՋS@_^ uRX>Xf` # 0 0 0<b^lu[τ1:Nؙ7v~;ϟ'W{pĉwo,j/ 0 0<z7 0 0 0={V֋@1o0q ɰ^8 W:Lx N 0 0/z^ 0 0 0 |GŁǏ3:~T5ْ/_`` ח\K```ӽ{zC;};Ws±W`V:(gh5a8/[ 0 0 mƇz0 0 06 Ǐ{Xq/^b6~ 0kcCkO 0 lӀ6b  0 0 0_^햸S[޿6wre`ހCV/~yW;``zxq,/ 0 0 0@XѣG[Ne:A?'`рqrO 0 0 ```W^^IXemjf,W`؃AZV3aFN``!````gVz̙w``ЀIګ= 02 wz 0 0 ~ {}ُͽ~g`nk@PVֻ2``kcZ\5``8ѣUze/:c={60 0Po@Xz``h@X=fn````"PwՃz^6{`؆a? mcpԁ``a=Zs~```bXoW{a=8qϞ 0 3 Aay$``f@Xٽ^f````^zu?``QgY```>}Z{p| 0 (>/ XZUsQs``Yw]```u |UAXIٳgBz]; 0 0Pm >E mF- 0 πjn9 0 0 0t޽o[[Rw 0 0luX?[5\ 0 0 ǀy````;"+ 0 0pzCXä 0 0p zǨ 0 0 0@}QQP/}|^߫[mƖ~կ 0 uRXϤgc`8acxVg````}{뭷&_?}wz~j\?>e``ڀ^, 0 0@8{``naޛ ַ/^[sMs 0 a?\ &x``c;Fguf``5_Nbս>LPRuǓԟ 0 ^ 0i/``cwv]``GM}AG;61 0@Cmg`8acxVg````=_dP/Vo:XZo,K} 0<z~֛7L"```oݛYe``nm G0?w_}ukoo+1Ӈ~xz7O><=}9˗_[5`Jƛc[0 "gr 2 0 c1 0 0 0З-pݻw0v'|S)N/~zr'O"eL5^0y6~o{o0Ǥ%jen1ݻ_װ>h a8^}``2 Z%``8Ϊz;n41ov2 <.B62yo ^s QTwj{g|@a M֫&  0 0Wzծv 0 00ٳɰދ/-6i"7`y5cGamXmns[\[ ۰pi@Xo6Ta=}!g`؇a}xR'````ѰO^UJ5ιS}fGcXAӧj؞YXψm^/\&W:]z~3es3 0 G```hX>XicFc*txE.Vߋ0Ho9^%8Fx&G3-7/1 Y/3u:ܚ͹NτЀIIZ-Ւ` a```(7p޽Ѱg}&\ͦzTt65\S ה穰^s8¥[r>ﭳ[0f%2g>{ bTXo骔9_/A }\kpXYo[l:2 0 \c¼ 0 0 0Pf 9}뭷F__|F;mFV+ ФƳgFWڳuex:"w½Vߌ^MC{a;=޳GXo#8|zkq.~``O+b``nk/dX믿 fi3[ؖ[r~mb }+W}cUT0xn 8Fhz[a󕰞0nwUa`5 '``z6?q2w im{ܖZk4ͅz}Hm5.^gfܾ̚6|.A |\Lvkr>``mf]ua``g? 6֬Qjv5\w(9FRlFXok\zOG^szz thfp# 0 0_zqv 0 0 0u L>|0/!QSǏًڄp2Ϧ5^Y=.=s~ޚH딚:MYv\2hjzokg}CC^οs}Yv\:%Lz|kԝ`8#?`կ~``8nЀߝPsa>BKssx. XxHvDHc>1>R>-І{zW 1С㲉Q?`؋#bU?N//} 0 0@?~V_w>\'>s[^K;Ο \w*A#h17<2ci@$LVSTJcc[ƪq% lsۺssKMyg*kThhEnLԭk.5K9!;iSd`=tNS``ZzԚ)9^XoۮlE[Rcn "aQ ŵp]Ix?%qY!PᘒPss- .9gcMZpx.06ޟ MK\on@8uy#u|m^j#׿k\TX/~>u+ƹB׹9$<DZl%aPzD1 0 ɀ{򪭼2 0 cJqzv LHjL*Wm9Ј:Y%MJ:FjuWM|Ku9K9֋>{N*e!T;J mKglu{=$A-ʭV2K\/ukI?[lkz55BV87=^^iojj 0 0800 0 0Уa=[۶\ZaFS+"jC`>ws+5_\ojl55͵vyrZrαYiPohW.@65{ ei(nK{ڋcohnEñNjasTݹZ6ƶ-Yƶc~ncC&ζտ 0 0z,nŢv 0 0a=4KXoۮn]*47\WP*Os;cmHkJV9 ծbHoo/۱$XwͰ^I=;ygKm SkvלZ켟r[N[S[ic0ՏS!vSώQ8;\uΎ%aPzu``j@Xݽnv`` b۶zK6s;S,B XT(a% AKa0-f*8l[qI0m[;;_J˺9&;wv8'5_G}`X׀޺ɧd```Qƃ޶] >sVK3VCTpj0T|e9OeVV;Ju+qS֩ZoxjԒp?8l[;8Gf}RҗsKMXoj͝kyaɼ_Z_6LF 0 0a=ֲ<,1 0 lɀ-< mխzV-B#նr֥Smgz1f8ga9uO, :]s)k|Xi4W q*XwsWOKӆZԠ&lXҿkRu*xcm8]s>Xӿ޶?Ea=a=:4PPt'h5R#`2 Ȕ?g```mVXoۮnK"2xk[r+,厏{ZBi5A?WM+) F˜εv(gn`(uܖScF;cE^d[\pnje\H4h7o8_md~͝3U.w)3Kn˰T?yg,AZV֫&} 0 0Gzѭ6s 0 00e@X)#s~.mWJvsLgnPE86Ԗ-n-c&SQLR}"anX/Yl sK+퍍2껹[N{`6%\?ޜq>w> mŜ} 1СkM  0 րm 0 0@zm^jlukM.JB\i5ܪs)c˭7ԨU\r!wIϯ_SRKι Zszm!8-yԹ[WjkM]ug``a}|Dzw[1-Bg-F8%zTu[r\X/6Թ[paն{]}ks6/9VV֋WIkj8G+5Y:h.Z-/5d@sZa5\K5eKU,l%'@L'E}```OxݓWm``Ԁ+Vj۾.YoecKʚ`.'sע?- Smdy9KZ6؋KVpjVK% cS[Z[#c[O3k6Œ{y]aNYbZwA\0 02 z_ 0 0@a~⩮r!m$9*]ilesiVKw" [oN?EĵѾ59Rcw?VaV+u:-٦9huV -a~jk]a.iHXOX 4 W`e@Xo[0>ԃ``u ӏ<*fҭw6~+ Ѭ=OhcɊc%F^K.\c^#yksץ]kaĵz@-L9XYoSJ5%juԲޱq@w  0 0@ 0 0G7 g zp 7www?X>r`yr*xS폭,SᏩhS5p\3F ko :'0t1TklԮisV05:uїEjzcݒܠ}?"[CVjj 0 02 Dž`` qµ>\jXYGncs{EqS!k5m0krRX^c-kT`yĥ=Ln֋{j^r E/=X:1x  0 0zN 0 0 ֫/KX)HZ۝Zrhk\& *yOsW[ ծhY~_BX/׆[׶"}_k)uܖڵ}[F0KmOsW{='֨eM-י͵uyO[yKPà$ 0 0 1w0 0 0@ʀ-ansaۓᵜp]ej6 )^Yc0]M\UIbi+c&s>s+`=&k/KUX/o*x76g؎uN(缏kCiS%"?Sl鳲VA-+4h 0 0@;z[} 0 0 ݮ{v/_Wƨ Gm" 3geq2N[4, ש_K2v2U-zq%9JC9;Ms*9VzJ[m^idĽl-;fbXJc.烒kV"΃S[߰_g%cmC%}ᘺYzz thDX7/ 0{5 ^j7 0 0ch1Gv5a1 __[ \Sm3;?۵pZ#QƜ - ʙʭV\#h@T.5lڷ$_cA"$3'.4#=k/}5* 7 }Xj~QSZ.[\`f9> NyŸZQ{~FcͽV}Ep_:۵d^_̕+MF&>9sxg.a=a=:4`>b` q܃c1 0 0a=&Z zExR9algO>nW5Zsl5_vV'@mfr+t=0~枳}p[rz~j=޺5vlSR0eec bz3 05 wj 0 0гa=[㪅ڰ^68'۷4ږx^ڱj[*k``Ѐ v: 0 6 Xkc _c3EO[oSK?M-[KanNX>jjOLny~z7R}d{=Vsk:L\O\q``?   0У_g?{?XGd~b%Y'^a=Z㪅=3(MCn5BKu>}Ub#5۵6 '@=3SxWs5g`[UC=`u zwGwB?߳ǀ^?Ҹ㪅Ga=Z9Saoy>8VςZZg"  _^KW) 0a=¸`z4 u ֫/KXx'a⫿ʅ殮'$W{aONzeX1|>0W7Wk*'@Lt&5⻤(~VQ|a4C{7=%G7AZV3Im"~spST.{F鹿[/f {sRjֶfW2пakl1 рGtgcBXjjoLm;|o?(#olGG~3՟w{sGЀI_|@ <>^cӧYzL]Ӣk  0Уa={tힸf΀^]U_zeS]? _}W|G4ث.ǏZwlv}YO3-0O<,t|n@Xލ?8acxWw87 C9AXjjn\$U+wϊKzz thdd@Z&zxO,>cd8kڐc!  0Уa={tힸf΀^]U_zeS]? _h{ޣ.;5>V: jYYXؤwsaҺc}O◅Xe/^sg[7 a='+74}}ww/mq&5WJXoyr``0 ǂр=vO\3Pg@X*/a~I?1 0m  1СgۉFXoho.W5ߵ|^Ka>j+1 w~]3m _~/ - q•sr 0: jYYDP;8~f 2ɩ l@G[cV\=hi@X/a@XouxjofY`4 '@LLkS*$L?fPKa=T9݋a=VbU;Yezxq,/ i@XϺz quk  0 AZV3MX3.ꫯN߇~O/_I(0Bq['}r~O>䧿o=^Svv K Q[+>߭S_z>/~?88Wk5Ygckc9a=ZrN``ր݇a7؉p^;:z7~E'Oto'SlMk="91wqL]wvp%׎ ]cqT py)ZD{\kxCSm/=˟8{_Hm]amQ׿>oCP,~,:aM~lxnn}}8z愹漏#㾅{a=ZrN``րVA-+j'oẵW֋T@/̚m|#(akrk_#8wLͺ~Mh1e2.59K|jksFK5xqjpg[ȱ};j#z_- D,x.eӟuzޱƱ9j8sy7G5 ~ z\p\1 0@a=a=:4P;8~-bŴSqzsÃѾT={ qڗ}[;.'p%+Մ"pȜ^.6-sV^ CX/jwnER7{*,= QcFXu6ՙ D& M]#~~댅b@pޒ{zr8s ' a=cX㪅+`j juԲv"pvͬ֋0Y*3'68Jmg:mW0֋P՜0vhwo/}T[s)WQ`MX/ں$@x%FJB5u6Ԏ\X4w֋{^s0U8fom[;N``-j[.7l ԱwSuޒ؛;_얄!k?s2h?7g0 1¼W-\9'W 0 0Pk@XOX Nc͐N.(Tg. 5jrm z6űlІWy,4U EDۢWJCbb?Z^|AuyaÂϏUǂzϱ=F?D /v6˭d Q}斵-pkώ>;X;Vof7l,>ZIXZɅr!D-^6Ehv,ZpՄZg8o\X+`5 aPz&ډ5VX/b ɅLBCrA0Td%[ڎJ\.d[8f,(VRK4u[+T{l7{~W2ŗzc+3S`n 1y9 vQKzL`bԖ۽?5fZPt6GsrkV˭MX< -ƆW-\9'W 0 0Pk@XOX NciX/"PTE8Y#s*TXo |\,lQj#W=OdZ@ڜ㦮{딆b56sapp悃SAӟ&W3K<;cya=ZrN``րVA-+j'oL.wkcxo.4_z#뜭3s6VT[^l-c} [ l.=Mlir,76{\o{>u^?sDm^56՘ VǍm;ӵS֫4}nI_;;B%0jzy'rz.U W 0 cCZE.%E(^uOvk]]-'sCPlLԶUjC} d^zǹ շ$7]Kֽ-p;ɎW 8T6HVbZי 땮w~O@cJ;V)my=K,:#㾅{a=ZrN``րVA-+j'o̚a 6w^3޼gom[;N`` ]i׺Ц*tszq\Hn* wn;-#7ՖZ9cea=ZrN``ր݇ZaaԊoKi\\Ѯk.r0Xj\ knp_箪jsQߒ pZc+ Md8CS>'Fm00 Hֶ$P2u댅nAԊvS\ȯ,sٗsb{Հ- q•sr 0: jYYDP;8~f EhB 7f+"ܔ5bȭ֗kK. 6g{k=V\/kI_ $9,~;Tm[jY:K1}ƨM룎ƣ:2U"ﵮM]knX/w^n5Xqon cqn{g`_U/a=bU;Ye`oz+^߃vKִ\?<^q|n {-Њp]AKj5\.^\+ 1N.ek/ۧ[ ўܘu1glےSZ5{?;XXX6՛RRa%ϗ̆k^:[ 7ymE[ q(7 WW\^y_q``vЀIݤy]-~VZ֜Z/;MZ"UʚSR;Naj59?of-uVw&q2nj ǖu\?_-W3L`_ɭ^C{>[u^^OX_{9a=[㪅+`j juԲv"pv, Ex' ] cMj~~^*έ.%-Sa8oI`|`d{EXɭr8xl֋67i|6z}xTGX@j%!wj{sys}z; c@XZni\ q%# 05 '@LLKCaJ_玮֛FXPVMXoܢmKV-qKWBȭ okNr[f`7֋sD|qmY#,Vʪ E56SX5Ī }\_36w’̅8BqT=Sc2R-p̨u>lQGQ``mC{Kvmua=cb1|L1?zƙW{p2 0пa=a=:4`g^ JmE[ޒFXM>RXon˗ꈥ5uXoU#;S5/7~R\6kcyF6j):[5ĶS- Ž}a=cjcJdzGr-ՑW`خAZV3M5zc땶gV鵦kі-r j{ϱFfk$u1Ӛ ajkT{V~yv mƅZ0 UKal5ZGO-{? 0Уa=[㪅+`j  1Сډ}x֋VRڱ5bH<6mlIZq-VXz-KZS\7BL5l-m֬^wkX]S׹ \w{͘u: ~d`liK^Hx[Gzkxu,/G7 g z\p\1 0@AZV3N߮HUJ Ml-Ml_*4vO>MJÏcs ~ka\2V\pyc ]^ng0o 9^mWs\`` +ͽzګ k_(Xoa]X+`5 '@j'oVX/j:WJjjoS57@kKZ%olKV{ne#B~ͭ48>$v^Č| :>wf_͌35c-ĥkun[|Sܜ -=O-x 0 U Wz\p\1 0@AZV3N߮5zJfO{  g嶴O׮WKb%<&uS-n-7'lYW`s }VjS-l6Ǭ1n; a׿kXMnscuƸ9};_s= ػa=[㪅+`j  1Сډ}x֋:/ ڲv5saX6wwww}EP1"ǩ{ s - ~jXjd^.:7ùKz9lTf,:v9VmUo[``Mkrms99+RpݜժSa>a=c{ͱ\<5f\)AsBQa//u*arA]j xJK^.WcUrc=-TmڵèE4sm+#k,XB<@m&g@X  @"A?OUw" t_{+ak_[<he)C~Ƒ ¿W-\9'W 0 0Pk@PàLkf^.HTP.J.;0TIXoi_q\ +7xm:\c#EOɊ8W{r+ѕ`^.( 9FG{ٳCx21ڋ6Ӎꫯ&l\;76.z3aQa=. 0p\:|79TP/Γ ]:[ E9^Yyj~N'wqlW{CX+`5 '@j'o" j{ R;Uq?ߏJ,dkv!Ove*6 + jhoަjFXo*8X5pMmj|u(Ema> Fm a` X-a+.[}Ru]S}>q#LZWX 1py@Xͷf`: jYYτg@^.(R;m[j][/ƂhS]=J{S?yiP$Uz%j  jRMqX ֘y>k)Ϻok=_]om{l9alsV=3 XY΅j9b`Zzz thv"pv-zQ6%[^b%˕R[֦l[Ζn{~xv%c$?ڗPePpI5u_ce5U֋smi[[b+MVX:J;f 9fmYw]``00EkIp/Bd% ]:}qrk?uR}9;ױs4z̷0/U W 0 0ee=ADIVZ6;Ҝտ=sb ֮\V+mSa=k_#Zaql'[3[(+앴ts$|՚"kq~l.Ɯ1M޷Sa~ji\% 5a7U+Ex6vD?uK(k%XXX;ֽcya=ZrN``րGlAxE6\o|Kk\Sf\w%aq0]%ڱ~?Gў诒U,uKqKhDtVkQ:ڛsfk~>Z⽦l9ƎU˔a=. 0"`[?[w~-U.Ƕ^a۱׺7s9z5 v z\p\1 0@AZV3NgfOjz{/m5aj5]6盁v-``ֻ]^X}7`؏a=a=:4`$V֫3Ύga c?v]_``z5 v z\p\1 0@AZV3NgfOxݓk5K[^ cl2pzw; 0 0m ߣ7 0 lˀ0nkUu!nG>|^ y׹a>qcud``cBX+`5˻v"p<3{2 랼^̻\Y/ʵ:%3 w^3 0 0΀^=[a=߽ 0v  1Сv&YXz)ӧO&O<};a} 0 0@ڀblq•sr 0|xV3NgfOxݓk55&bk\5Gno@X50Ԁ`` ߧ8```@XOX larVjek}yrU~[Xg\Ke`2 Ȕ9?j`6Kijbڒa=mya2![Uc@X:̳~f``G-QO`ضa=a=:4`ī>#b7xg/[UccV= 12ea`أa=n[㪅+`j juԲv"p<3{2ɓy@)?Q~X'd@XXsgz3 0 ŀ- q•sr 0|yz&ډW_}uۮܚ^|i,tynM##w}_e`8a=[㪅+`j u宰v"p<3 0 0> nƛ1 0 00n@Xi1F={vz뭷wy9M[XrNs 0 ,1 Na=’I{a`c@Xo?2Ԋ``rz}Uy_gcaG u1R>Fb`::)wc 0 06 > 0 0- qG}4{:޴%4G1 0z~3),```?S+J``(7 WW\ǏGzӟ 0 00π0!Λ~c`cvofY``z8=旿hX/è=Ye`5 aPz&ډ0 04 Ϻo 0 0a=cd1rww7{뭷N}^ߛmO 0 0Ԁ^:L K'g`؇a}xR'`` _O^x!G`CuIK3 0 ʀ{1 0 4 ھ>Ѱ޽{N^{ӵ-9`Ca 1 00 :O 0 0@ak"wѰ/wƆ`h@Xz&-N6% 0 0aS} 0 0 ݾ=>lr &{v/ 0 0]zz thIWmԆ`4 Ӛ'``cq-wwwL{a=ߗΔ`\dS˙' 0 0wz ݰ3 0 02 ZsӧO'z/}g 0 \.ևjMXo8G 0 0=c``KzL1/jyݛ 6k\9e`(1 a2TX/`ؿa8TC``^7 g\,^:Ŋyo+ȍc^e`(5 '@J'yX0 06  0 0@ڀtl|A}z']) 0 0pUW_˙' 0 0wz ݰ3 0 02 ŒO?- =zHP+g`n\{~9+' 0 0 1w0 0 0kϞ=+ Ūz/^``\_bkl0 0 À1l<3 0 0 XYZ^:}A&``` z&DLkL 0 0}zۯqF 0 0 ֫#;NnqP/pߑ̽c 0 0  1С&U}``x7``ZX+vwwwo{w^qP12 0 0 aPz&sr 0 lπjb  0 0 -~g޽[ůxBPF{̉ 0 e@Xz} Rz2 0 a~```Gz\_U|P}^ߋ8's  0}C^߃֤ 0 0a= 0 0 h@X︮ӳgN]ӧOO?>ūVڋ"XqO+j 0 ׀0)wRV;c`zxq,/ 0 0{1 w<{P^*o|P```36ӐvZYx6r 0 o@Xo>T2 0 0p{z5AbTnŊ|/} 0 ls=_KXXlٽ 0 8bXɓ'/_z``:6OO_W?{キzP?>z_.`؜5/][އC``=8RX__K0 0 0pzwY-ѣӋ/|);p```6۰=|16 ׭.``8RX/\N>```]K_z<8}w޿;k} 0@z~hk3XLB```kZk9``Xnӧzly! 04 Wam60 0 m@XM9S 0 0 0pmwwwؾ*z{gϞ^```_:/AhT/``ؙka``- {?=~oS|_ZA5P[̋ 0 0p z~3y\cp ``ֻ} 5``````Rzz thtp 0 0oz~ 0 0 0 0 0 ˀVA-+k՛`8a޸W{`````g@XOX 71 0zq=0 0 0 0 0 0pZwd 0 06 ܵ͹s 0 0 0 0 0 7 '@L'E}```OxݓWm`````: jYYv 0 0pzźd`````zz th=x2 0#} 0 0 0 0 0[1 aPz&L0" 0 0ր^W2 0 0 0 0 0sy0 05 7j 0 0 0 0 0: jYY@73 0 À1l<3 0 0 0 0 0}cC&>&huTG`2 Ȕ?g`````0ee 0Z0 0 4 WK_ 0 0 0 0 0cC&u'J?```ܪMb````` juԲd 0 0p zǨ 0 0 0 0 0 a@XOX Q```ʀ#SF`````c@Pà3Lvj 0 0Ҁ_-}97_ 0 0 0 0 0 k@XOX (ם(d`cs6M`````^7 aPz&; 0 01 Ƴ:3 0 0 0 0 0Їa=a=:4`cVGud`)zLsF`````؎AZV3٩ 0 0@Kz||1 0 0 0 0 0a=a=:4`\wԟ`j@Xͭ.6`````x݀VA-+Lv 0 00 w: 0 0 0 0 0@Ѐ ZՑ` 12ea``````;: jYYo;d 0 0- җs 0 0 0 0 0Ѐr݉RO`تa=6jSd`````uZg31 0 À1l<3 0 0 0 0 0}cC&>&huTG`2 Ȕ?g`````0ee 0Z0 0 4 WK_ 0 0 0 0 0cC&u'J?```ܪMb````` juԲd 0 0p zǨ 0 0 0 0 0 a@XOX Q```ʀ#SF`````c@Pà3Lvj 0 0Ҁ_-}97_ 0 0 0 0 0 k@XOX (ם(d`cs6M`````^7 aPz&; 0 01 Ƴ:3 0 0 0 0 0Їa=a=:4`cVGud`)zLsF`````؎AZV3٩ 0 0@Kz||1 0 0 0 0 0a=a=:4`\wԟ`j@Xͭ.6`````x݀VA-+Lv 0 00 w: 0 0 0 0 0@Ѐ ZՑ` 12ea``````;: jYYo;d 0 0- җs 0 0 0 0 0Ѐr݉RO`تa=6jSd`````uZg31 0 À1l<3 0 0 0 0 0}cC&>&huTG`2 Ȕ?g`````0ee 0Z0 0 4 WK_ 0 0 0 0 0cC&u'J?```ܪMb````` juԲd 0 0p zǨ 0 0 0 0 0 a@XOX Q```ʀ#SF`````c@Pà3Lvj 0 0Ҁ_-}97_ 0 0 0 0 0 k@XOX (ם(d`cs6M`````^7 aPz&; 0 01 Ƴ:3 0 0 zŋo[ 0 0@_:/ 5' 0 0@΀```{wNok?~|裏N_|)B}=ܳ{0v``ccC&cN 0 πjn9 0 0 fo9ݻw/K~xp?L3 0пAZV՘`` -//ϟ{```ᄈzZa=> ]_x9&```:*gbĤ 3 0e??x /} 0 0 _N{ew, ὏?J{~wyLՙ`ƀ^ZL5cya`kHa^ 6H-}vX```?2cemwڽ;}t{;v:Ԕ7X`l@Xz&=OJ/ 0 0Pna/\:=ָ/J_1 0 DRX8֟>}ZXoFo} 0 lt_2[ߣq~mmYb`m@Xo1~ԇ``y^zѣ{ޗ_~) z߇3 02[XXn͟ 0 \πk} 0 0 ]:Ox\``Ҁ^Qa=d 0 0p zǨ 0 0G3 wlދ/~z}gNjB|}^߉mntǞ_`~ uTXjUK``1z|#``р)ױ~zgbNj{2^``}cC&}M^ 0 00׀;sx; 0 0[6 nVh/V: 0 0ҀVA-+4ZN 0 0z۩q 0 0 g@Xo_}zwVڋu{g 1 05 '@L۝tFm```Mz<ɹxb`؊a=k->}8wSW{ s 0 0AZV39198G 0 0 mFƑ1 0 0Po@X8;矟ݻW{葰^ߑ`؃a? |0h# 0 0rzC} 0 0 m&{'-n^Ϟ={ҽXN 05 Pah3 02 wz 0 0pz/?q2+ wjGl1 03 '@LTg`k\7``kcm kd`޽{^`we/e`fjZCsrfe~I?1 0 ݀{7 3 0 q[ViVXϗ3g`fjZCsr&WO<'``cxa``a=Z%;z9 0 0p5WP_bkL0 0 À1l<3 0 0 XY5?zhru/{9 0 0pWȚk4a>H1 0 `@X``. 1_N>Sa=_ޜ`\d5 tq``z0 q 0 0z b^?``EZv'Zc``QgY``fz̯m?\]N`;g`h~?L;/hz}đ>b`=8v3 0 XYɰ^lۺ: 0 0^Pa= 0 0p zǨ 0 0G3`e=6իӽ{F{~^ߛmO 0 0Ԁ^:L K'g`؇a}xR'`` _e_r4Sa75>Ƈ~O 0 \π^:7LV``?``a=ZzFz^ߛ(`Xb@Xz&% 0 ǀ~je\ 0 0裏Fz^ߛ#cD_+`c@Xz<&) 0 0pkz ڠ3 0 0€W-\ga+zw:޴%4G1 0z~3),```?S+J``(7 WW\ճgτ:^(J_1 0 z~(2ɩ 0 0@kz61 0 €w-}z~/Šs``a? 4ZN 0 0z۩q 0 0 g@Xo})Ǖ 0 l r 0 0@{zc} 0 0 ]Ϗ\X#8w3 0}ZgrT#``5 q#```klaRX+`5 '@j'{x0 04 Ϻo 0 0a=c㪅+`j juԲv"p<3 0 0> nƛ1 0 0 a9`x݀0y1 00 w:ܳ'O~_x`8+¿W-\9'W 0 0Pk@Pv"p<3 0 0> nƛ1wo7+N' 0 ۀޱj qʖ 05@$X ```[;NOzƂ 0@ʀ-a=ZrN``րVA-+j'3 0 Ӏ>f7 g< 0 1py@XkYs```̀0``c;Fguـ=vo|3 XYo~q;a=``` : jYY䲅E8d`kckۿ_`c]a=ZrN``ր 0 Ӏ>f7 g< 0 qbn㪅+`j juԲv"p<3 0 0> nƛ1 ǀy`qzH1"U W 0 cC=<``}g݌7uc@X 0z \ֻ~s`` juԲd 0 0p zǨ= .=vo|3 XYo~q;a=``` &mc`kckۿ_`c]a=ZrN``րVA-+j'3 0 Ӏ>f7 g< 0 qbn㪅+`j  1Сډ 0 0> nƛ1 ǀy`qzH1"U W 0 0ee=ADxf``}g݌7uc`yX﫯: [㿿q;/_\?j}^k5]uk|g{r͑~}/Ϻm} qu( 00 wHm5Ȅ1x[q^~``O+b mt=y_ow>lONovі q?Υ\|4twww{NG_zΕhLJ~8zUBkhe 鋱]3fǡ}\opY:;ssWmm ݦ{.U 0 À0cV'ub`Zj^Raq\ֺk_ksm^ӧOv_T~2R}9]kX3@*4tr[_FixEi,$U  F`*Wwl;֪Ya/֋kj`Sn6^Ƕ6܆|sqբݗu) ׌z kF;oRέ *˅jt9̗l<8e0LƽosտK Oz\p\1 0@a=a=:4P;8Ã`اa}xS7ndvS[l9*3pcb Qcra՜\&8~R_BnV06x }?)Wa.jqTwrZ6>ei@Xouxja`0 aPz} N:2 0 Lcdʈ3uSaMȆ{̝d8G* 8ݏyc[5[6'Ҁ_-| q•sr 0 coy\?``z\k $ՆVC+ iV+y累\["`U^mih_*TΑYr@Ym0.ږJ7 iOl;gTtZϹ~ /٠_k\ U W 0 cC=<``}g݌7ucƶ-]Q?s+nMrḱw;gIn,77Mwm+նT]? f`?S=+a=U[ye`_Z5- 0 0a= @*j=sQ7 ~rzzCX+`؂a=a=:4E<``1Zr|{V+ jEl*P"4]#0d[ג;&u!dן @\-HSu kA̗5k;?ǨAKz|%U W 0 0ee=ADxf``}g݌7ucZrA\0v%m-F.[su\ݝR>_1j-) qʍ^YosK4gaj;ޚXknk@X߫a=zf``_Ѐx_z 0 5 \;V 륶i-zϩPTz-LKWKZ*%&S!\]rԖsnjc㼏TS~J 鷩\ce`a}}(Ǖq 0 lVA-+\0h 0 0@{zc}@[-I[nhùTljVahW.ٳw.pupT7%efkn^9j 8]a-mt4 Ӛs q•sr 0@Dx```Y7MA="T+ x؊rE+pxEil{kwyo*V3εt'υ*B% LOɽzkS۟|g`]z'ٟz\ 0 0 juԲe 6p 0 7 ׾9 5"mpVj^7hKZP#Wڛ IYmesШu鱗Ʌr[SSm;_a=6%T S 0 1 '@L! 0 πjf?7"T m Fr*PTX/1mW{h3ka8mIR[u*8K/eJX3qc;F=յ͹s 0 02 aPz ``QgY{6"rǯk. C׹z{6ՏqݱۺwlԖj16RmH7S!qSB[X.Wj#_M0NڂCm`6s=׎ 0 ˀ޾e|hH \V+ "5 V;eƟ"5{ V{j-tjo=/7[qjɖܞ l߀kq$j3 0 g@XOX TM`` e@X s{7"Zll5ܪ_O<{ k{k+ ˗/F_jX]bl9y8s5kұ.`@Xj9b`ZZ ѵïqLp1 07 wj20pTj[M -on5iK6˵ߛmC.?ܬk]>X.Rc]6W?w zܵp'U W 0 cC=<``}g݌7ucr۔FgN,w`Q*H!%[qqΒu[R}3bj}nk"xRtK}nvNy,}룎[z\mͤ0 01 juԲ1I\``;^͍s5@.\^n ӱ-pDцX!s":gs ŒSựX?]˭(SAֹ!\g4Z>#f@Xj9b`Zzz thv"p 0 0Ozn L7ធbTfu\p08ٳl[J%|=x=_"W}a}hw k~:ORBSG9֋ws9/=U>aI-m ݦ{.U 0 ÀVA-+c$Չ```a=~nm`lem9c\[#@ W77h[٦čQj;6+aXr>'{A:Dߔr+-n{o#_r~Qhk@is& 5{盳uR9@;zn:? 01 '@LۙdB-``a=Zrna26bÊonʅmtkל W1֖ڀS^4=ޜ[yhoC7-]0p׈kE`vXojšku[sTmcƛ^MwI0vEjƄc=Khc@XMݫWG```Ѐ v: 0 6 XkcXkϞ=;E)Izi28v+hpZkXUßq\۰]_ՒFEІ*lsW,7ccjq%ׯAXAܗ}c#T0Z7W!}Ycͩx62pzyh/ 0 lӀVA-+msՅ```mzLmbOZ EiiVqڧSuUW87 C9AX+`5 '@j'{x0 04 Ϻo6ڶ6vK!RC b~㪅+`j juԲv"p<3 0 0> nƛ1 Bu[a=~`@Xkz]ӛk 09zz thog`a@Xu6ՙHn^|K 0Џa~jq)Ֆ}mE۳7Ϙ`5 omo9n?`Zg``a@Xu6ՙoO-k#`E.'|S rsߪzNsF`Ԁ+VjƋcya`V@ y=``mV=`-xI2d7"l]BzEom' 0/z^ƗW{2 0зAZV{К՗```0 ǂ1ގ,|Ϟ=kl9\kcjmSq>a=ZrN``ր^&g"  0 0Ozn 0pK! u4Hz j9b`Zzcv"p<3 0 0> nƛ1 <][Fӧz\jz,j9b`ZzÚv"p<3 0 0> nƛ1 |WxE?O>_|)#nѢ6#` qb.㪅+`j ulz&ډ0 04 Ϻo 0 0a=c㪅+`j  1Сډ 0 0> nƛ1 0 0 a9`x݀VA-+Lv 0 00 w: 0 0p4Vcya=ZrN``ր 0 Ӏ>f 0 0cDX+`5 aPz&ډ0 04 Ϻo 0 0ןs``uzz thd 0 À1l<3 0 0 XYj9b`ZZg"  0 0Ozn 0 0 3FZa=ZrN``ր 0 Ӏ>f 0 0z \ֻ~s`` juԲd 0 0p zǨ 0 0G3`e=[㪅+`j  1Сډ 0 0> nƛ1 0 00n@Xi1Fj9b`ZZg"  0 0Ozn 0 0 1py@X}ι>g`^7 '@Lvx 0 01 Ƴ:3 0 ̀oa^X+`5 aPz&ډ0 04 Ϻo 0 0a=c㪅+`j  1Сډ 0 0> nƛ1 0 0 a9`x݀VA-+Lv 0 00 w: 0 0p4Vcya=ZrN``ր 0 Ӏ>f 0 0cDX+`5 aPz&ډ0 04 Ϻo 0 0ןs``uzz thd 0 À1l<3 0 0 XYj9b`ZZg"  0 0Ozn 0 0 3FZa=ZrN``ր 0 Ӏ>f 0 0z \ֻ~s`` juԲd 0 0p zǨ 0 0G3`e=[㪅+`j  1Сډ 0 0> nƛ1 0 00n@Xi1Fj9b`ZZg"  0 0Ozn 0 0 1py@X}ι>g`^7 '@Lvx 0 01 Ƴ:3 0 ̀oa^X+`5 aPz&ډ0 04 Ϻo 0 0a=c㪅+`j  1Сډ 0 0> 1ϟ{``:67~_[K[tM\2 0p<ZwlVs``֋/ 0 00 wu[Ol9/[ 0 0Pc@XOX L``kHapK0 0 0п߬;վ+c` NXY$w0 0 0Pfha=.\' 0 0 n@Xϸ0.o}/~W{t?- h@XGɪ=q 0 n@Xϸ0.``` Lo=_-L8a?t o ՜`8acxWw```_~yB[>g @wֳmja w5 '@j'{x0 04 Ϻo 0 0 \_=~,j *zaXmW_b,+ۼiܓ=z&R+c`طa}S?```z_|/7 Ƈ~xz7zŪb+_|n._æ۟QxگW{}}lOCX+z&=MB+ 0 00߀N1 0 03;6."x>/J}jZ:[jL<8w8y6V@[SFsm26^& d`[?```X~_?~,ZVN ],BB|.ß |Ga&Nح 2>1>թN"ײgSY-s=#jz£<^{<f HZrN``^M5a``k/ }MC/|V劐Oװf} 5>zmR# [nss I9z,q a=a=:4`Vc5f`0 ǁ```WN z7, Elř Rֈ 땏ɶ* )uK1r_7}D=8m'l k6Ik/u me=H+[ 0 0-z۪ 0 0 0 {}7#75vՒj8*LǦM(zqӺraFήyai[7>;FlUX4[361 00 : 0 0 g&z=j~au3cwE@h{g?c( ϗa]6Vv ×ί)'@n= 0 0pzg3 0 0 0Џ~2[o˫`O[Vj0a~jZZc^ȭw{p^OX[[0ee=˭'g`a3``?{wN^ػwć~Wyn\*]nkR+zZ%KQaרq$a=Kc`OzƂ 0 0 0Po/޽{g9ӧ77fX/Η  ّƺgo.'@n= 0 0pzg3 0 0 0ПvSO?T` ߥQϟbخ3Kɓ'h˗/Wik{%;Ç*k^/ϽF:Cs혣-56,5ڞ E8w9֋XYSrSǔ8*jvަ=SYhgjE[ky~k1[QK >לoJ>O{m/k=Ok_P .Yиچ\!```Q'I```Oߟ E/\%AA.8q@R>bPIj%غ%[^^'}ޮKSC;W'lZ%kVJ5JLwjϭd{NZ!ƒyi{q-1Z҇Sc5d|ݭ 7+#3w[sĘ-zD^oa u_9?16Yi_ m KSF{zc[;FXOX lmў#  0}룎ƣ:2 0 01g"ǪS:Eh X#82v[ޜՑ.4_Rh0JsJKϞ= q\)ug8Z ]mpݚұyy@=\ E✥hvSө;Ά6S2WQK̍\8tDdnb,qx, (uKs0eev(bz2 0 a~```Xf+}W^\ w\;$5Pi#6 WmC'"j}6S+Zؗ ͕q0+ \PԸXkr.o=ŵkCrca/y\m,̤¥S|ҹ&ּ,uZ3gOi$WӶIi%ϓ%s+'@0h/H1 0Rz -5 1 0 0 7Nq`ŋ{ _ ME `-"ssA #4xmYRV*]H:BVKV+Lduϰ ЯSۇ\ss.WYJ3Fh+ޒiǚa\׆" 9h󹧩mHk ˅J`'S㩠/q0oM-52V8I͝-z[KBrsVb, åj1՞1$cu,\3ϙ϶A$o]\+]A``:C6͖'7C▊xa~)rM``00 0 0 k vݛVw9=~w@Fi`/Z Ƥ7(h SU@sqKBn}5w9HJ0ca4uAۖ oa xnkq_s $gi rlGcZl =el>Lcn9~Ű^.X2yl>j<ɅeBۗͅB>K<]oéՆZrm#؞ږwyco) &uq^n{ީj}.t[,Yۢm`߀^56՘``o;{իWz+}G;"hrctkl*"4wnŶ3sͅ9J温Uki`dnX(Nlk.S[kkeX/7~V̭wMyl^^.VX/է2z^\xvvgZa>NMm~if]K``€``` |{ԋ_lcjb]JVF jJ ~%/y*lz6Dvަ\+k*z~\ HqvUU26K{j8wn̔_lZҩ:;'DZfK\oa=a=:4IF[SS` `>```X@,[.}ݿ^oOmukz]yr@ڵz ќGJ5 cck/aTMyeIX/ږۂyjE[ŸѮAZ-0"H BqlJ{{MBX:nlIySo.[yRW{:NPà}V`` _cX```裏f{g}vnk#r\ u|]$|ᛸ~., '-ٲ00kٯSATIZZs-iҰ^\;uurydVA5V֫ @wm-!ԒPtSz=J-M͹WInK'S}ǟ b+0zU W ``J qR10 0 0"hWރNbt^`n5<ڑ 4j5k+jWK,L3"UYܰ˚m:Wjة-ιk䮙4\+Veˍ=wc[]z-1'BCmJ{esv ZmE.`Z%1UU,[[_XOX jBqm2 0@q 0 0 0@{/^ ݻwg ԯ}.82Ʌ>֬qer!C[2^*dZd[Q" O1>JE[jeXoΖ٥}mᴩs-לn<^K-^5\UpdsAZVֻ=~m`ؿa8TC```b?$Pյ68QiYk+\pt:׭rA-b[]{1Z+VT+]e8[[.{OJb5 8ͩkZbA "5?5t$-eT{`7 c5f``x闿eqP/UvXokCb Wj_k;( 2մ$ƖHɪY5RrzK%}䘵BaPbilv,VA6cۄGcc07 V۷%^n.u=ܘ)y.\+[a-Z_>qh?c 0 06 mƃ:0 0 04_[Xy/U}9"W][+L* Rem%\^njy_OK[je9.%S\'XV[Kb5$^d6k=Ora!htq;g$ZTԉ`> YWU]```b{MofKq5k_>N8VJ"|Qz͵ub"s,VZ?l9 F- هkrp\UlSc5晒a꼩Mrmjb_Sj^풱wḬ`9P2לöx.AZV/[ 0 0Џa~ji\% 0 0 0p]dP/V/:~*Q/0vU*J"ZW.vƖzJ_%iyrչޜ^nq 'ǹ9-s="uiX/77͒UR}[%YyIM}sCsm}zz th`6]f`QgY```u Jy%ju>?srh c!ke;ŽcvsAȩ9ֻfa܊öcZJ[i歒sVt W!rsԼ!XXnjҰ^.\QnE9+ ֞;j}mxAZV[-`mSk``]_```y}ɰޣGC1M&J*CGP$2) JBes\{v@X{K\$M%ԸsFF[KjX/7S[bY;p[լ4abN`,A8ojc+ )cAy+{Y[-ejzS|wVa9!rGD{K8&|I.{b^5-cC-& ~o 0 0Ҁ_-}97_ 0 0 0У/AXu믿. 8G׾Cc"7!E8`*43"TIbL+q?8oגRM8vኚd c׉*Y+~Tծ`հ^j%Vm?7P5v\Z.@ßT0*pyَ Լ3g}ޞc瘚k6=v! 2ױZ]t qQ省9o^n%h_Y?U۸X&/۵zcas_5Y{xVA-+y6r 0 ,7 9ԇ 0 0 0 |駓a_PߡVʅGj$s>΃(?e+ҶOͥ`XDZ-Y ma}՚5~"۾4X4%V3UT|'JWK4ן 2wֻ\ydKFXol!,~G%˅Ԣ亷K5!w#w_*ub`g@Xo{51NԄ``ضǏݻwu, Ԅ􆕑1{S!Ԭt핐JS+IqIoVPm1 WIɱ-zqݔҭlSusy|I3hֶ%m[2ZsW裢p_#P1)5EXolƸ,(sKi= M%۠>V*}96VcmS6Fحڗ+ T- 9v{W]'KMy JØp juԲyk; 0 0@q 0 0 0>ɰޗ_~)wOn9lW24lAeh$AjkʸvM0?S[bz/CjۼjkL's/sJWx- Kg%A8T,k犩鏘wf\srm.Q9Ϙ>y[)w+NV?ֻ[urNub`Xj@X!``8X5olUիW̓QG؊1WR'+˵%'k%*݂rν3\מqVjpKIK<,omX/5P氵\Oy:v5};8XzD{91 YckZ<FY1:{?FXOX }b~8 0 +' 0 0 0@/9{|oSH6gQ϶=jzt:}je/k^woeO8{t͜0 07 j 0 0 \Gz}hj VJmƪ'{]W_Fulٷ4zw; 0 0 1wms 0 0 hP//|,sH:%Ȳgj/{jwl #>֦k܀k> y0 0 00 w: 0 0 ,7e2 _& jR!O{vNXosg~-cE_uֻM``m@Xks=````"獽;_(,0W}̹ ˷~k#w1l=Ը~;D8QXσd 0 0p zǨ 0 0 0rdXիWPY`X>6ÇYkZg4\?7;čLv>0 00 w: 0 0 ,70{/}{DH>s -.lWƄ-p5n1n3w~]3 0 \j^``z1// ; /\My9M [Y/B,y̱ga¸}-"t]b5QIקFfPݦy 0 060Rq@``cz|'8wznz̥ |W_[ϸ?os vީ߳bOz}:&a=>F`` ?魷 0 0 0Ѝ~J})ǀ7Xπz}2ǩ`&v.HV+ 0 0 0 0 0$sSkr>8'Oq-x\G bezGc````دanNX-6>`8َ[X8# 0 0 0 0 0 z\]˚ 0cN믦&r5e````8a=[X㪅+`j  1Сډ 0 0 0 0 0[1 b z\p\1 0@AZV3Ng``````+XlaQX+`5 '@j'{x0 0 0 0 0 lŀ-, q•sr 0: jYYDP;8`````cEa=ZrN``ր 0 0 0 0 0z,(U W 0 0ee=ADxf`````b@Xj9b`Zzz thv"p 0 0 0 0 0V ¢W-\9'W 0 0Pk@PàLa````؊a=[X㪅+`j  1Сډ 0 0 0 0 0[1 b z\p\1 0@AZV3Ng``````+XlaQX+`5 '@j'{x0 0 0 0 0 lŀ-, q•sr 0: jYYDP;8`````cEa=ZrN``ր 0 0 0 0 0z,(U W 0 0ee=ADxf`````b@Xj9b`Zzz thv"p 0 0 0 0 0V ¢W-\9'W 0 0Pk@PàLa````؊a=[X㪅+`j  1Сډ 0 0 0 0 0[1 b z\p\1 0@AZV3Ng``````+XlaQX+`5 '@j'{x0 0 0 0 0 lŀ-, q•sr 0: jYYDP;8`````cEa=ZrN``ր 0 0 0 0 0z,(U W 0 0ee=ADxf`````b@Xj9b`Zzz thv"p 0 0 0 0 0V ¢W-\9'W 0 0Pk@PàLa````؊a=[X㪅+`j  1Сډ 0 0 0 0 0[1 b z\p\1 0@AZV3Ng``````+XlaQX+`5 '@j'{x0 0 0 0 0 lŀ-, q•sr 0: jYYDP;8`````cEa=ZrN``ր 0 0 0 0 0z,(U W 0 0ee=ADxf`````b@Xj9b`Zzz thv"p 0 0 0 0 0V ¢W-\9'W 0 0Pk@PàLa````؊a=[X㪅+`j  1Сډ 0ƱmmP23 ##@P# #QDFF ":S_zwOj^````ŀXńEW W`Z C-OsTa````ŀXńEW W`zb=ֻx0 0 0 0 0 b@b¢X+{r 0UB'90 0 0 0 0 b@b¢X+{r 0Ub= T]<`````F1 c1aQU•=b`VP˓Ճzf`````F1 c1aQU•=b`X. 0 0 0 0 0뱘(*ʞ\1 0@ՀPazA`=3 0 0 0 0 0뱘(*ʞ\1 0@ՀXO@CՃz`````Q XLXqpeO``j@0d=A `````Q XLXqpeO``j@'cA` 0 0 0 0 0(z,&,J'W 0 0P5 jjyzX  0 0 0 0 0(z,&,J'W 0 0P5 1@ Ń``````b=z\%\ٓ+`j5 'c93\``Xм̋````ǀXoעb3Oܫ+{f2 0q/C,Cd`````gb=&>'cf#ss޽s`` n 0mA=?#̈`````1 |]ӧ'C͍Xy``8{v}2 0 0 0 0 Xx2֛7?}ok?``~6 kX 0 0 0 0 0 K===}2[BLvd?g 0 ,a@X``````a@ݒvF钎\b` ~9 :  0 0 0 0 06 cn)sٓO՛ߋ3]ʑ}I 0 0^b=Ēxb````z-m~bVPҥ y 0K5)sP,}P؏)`````?e@ֶtrrl7W[ك[``] z 44˛ ````` \'rz_ 0 1 -rz͉'sb``````<b^C{\^^>?{h?`8lbeXߔU 0 0 0 0 01;w V(kxw`H5*sp3 0 0 0 0 02 c륶nnnbyK( 0 0K z 44s1a````ط/18?bZ{ 0 0^ 勾/f2+`````2 0~Ho͛i݊ܜw`؋|?'K3 0 0 0 0 fv?W3 0{3/\mvWf 0 0 0 0 0Xzc#?nnnjsOnğ< 0 0p<z KQ^k`````nzL/_LWWW/fS?~5/8`o@CX5f````j@wzwqz.O礟i`׀XO@C=v^;````8vb{Mo߾-Y ;==tgy`6 jjyo:ׇ`````_cNsT\xWW4Gkf` z 440sqd````qx޼ÃP=0A`` dW^M3`````8D?[7o,띝M! 01 ka۷oׯzbE`````8HwUX?^"֛0Gf`?a; 0 0 0 0 0 0 0{1Ç?Yo.//v+w/~E D h 0d\````````/V[{1{L1U< 0|!``````f`~2=ao^O6tww'toN fj 0hh`````````03+`````````5 1 0 0 0 0 0 0 0 0 06`C 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0ӞIDAT 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 06`+b 0 0 0 0 0 0 0 0 0b= 0 0 0 0 0 0 0 0 0a"V 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 067kKu`IIENDB`borgbackup-1.1.15/MANIFEST.in0000644000175000017500000000044613771325506015447 0ustar useruser00000000000000# stuff we need to include into the sdist is handled automatically by # setuptools_scm - it includes all git-committed files. # but we want to exclude some committed files/dirs not needed in the sdist: exclude .coafile .editorconfig .gitattributes .gitignore .mailmap Vagrantfile prune .github borgbackup-1.1.15/setup_xxhash.py0000644000175000017500000000501613771325506017004 0ustar useruser00000000000000# Support code for building a C extension with xxhash files # # Copyright (c) 2016-present, Gregory Szorc (original code for zstd) # 2017-present, Thomas Waldmann (mods to make it more generic, code for blake2) # 2020-present, Gianfranco Costamagna (code for xxhash) # All rights reserved. # # This software may be modified and distributed under the terms # of the BSD license. See the LICENSE file for details. import os # xxhash files, structure as seen in XXHASH (reference implementation) project repository: xxhash_sources = [ 'xxhash.c', ] xxhash_includes = [ '.', ] def xxhash_system_prefix(prefixes): for prefix in prefixes: filename = os.path.join(prefix, 'include', 'xxhash.h') if os.path.exists(filename): with open(filename, 'rb') as fd: if b'XXH64_digest' in fd.read(): return prefix def xxhash_ext_kwargs(bundled_path, system_prefix=None, system=False, **kwargs): """amend kwargs with xxhash stuff for a distutils.extension.Extension initialization. bundled_path: relative (to this file) path to the bundled library source code files system_prefix: where the system-installed library can be found system: True: use the system-installed shared library, False: use the bundled library code kwargs: distutils.extension.Extension kwargs that should be amended returns: amended kwargs """ def multi_join(paths, *path_segments): """apply os.path.join on a list of paths""" return [os.path.join(*(path_segments + (path, ))) for path in paths] use_system = system and system_prefix is not None sources = kwargs.get('sources', []) if not use_system: sources += multi_join(xxhash_sources, bundled_path) include_dirs = kwargs.get('include_dirs', []) if use_system: include_dirs += multi_join(['include'], system_prefix) else: include_dirs += multi_join(xxhash_includes, bundled_path) library_dirs = kwargs.get('library_dirs', []) if use_system: library_dirs += multi_join(['lib'], system_prefix) libraries = kwargs.get('libraries', []) if use_system: libraries += ['xxhash', ] extra_compile_args = kwargs.get('extra_compile_args', []) if not use_system: extra_compile_args += [] # not used yet ret = dict(**kwargs) ret.update(dict(sources=sources, extra_compile_args=extra_compile_args, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries)) return ret borgbackup-1.1.15/src/0000755000175000017500000000000013771325773014502 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/0000755000175000017500000000000013771325773015433 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/item.pyx0000644000175000017500000003563313771325506017137 0ustar useruser00000000000000# cython: language_level=3 import stat from collections import namedtuple from .constants import ITEM_KEYS, ARCHIVE_KEYS from .helpers import safe_encode, safe_decode from .helpers import bigint_to_int, int_to_bigint from .helpers import StableDict API_VERSION = '1.1_03' class PropDict: """ Manage a dictionary via properties. - initialization by giving a dict or kw args - on initialization, normalize dict keys to be str type - access dict via properties, like: x.key_name - membership check via: 'key_name' in x - optionally, encode when setting a value - optionally, decode when getting a value - be safe against typos in key names: check against VALID_KEYS - when setting a value: check type of value When "packing" a dict, ie. you have a dict with some data and want to convert it into an instance, then use eg. Item({'a': 1, ...}). This way all keys in your dictionary are validated. When "unpacking", that is you've read a dictionary with some data from somewhere (eg. msgpack), then use eg. Item(internal_dict={...}). This does not validate the keys, therefore unknown keys are ignored instead of causing an error. """ VALID_KEYS = None # override with in child class __slots__ = ("_dict", ) # avoid setting attributes not supported by properties def __init__(self, data_dict=None, internal_dict=None, **kw): self._dict = {} if internal_dict is None: pass # nothing to do elif isinstance(internal_dict, dict): self.update_internal(internal_dict) else: raise TypeError("internal_dict must be a dict") if data_dict is None: data = kw elif isinstance(data_dict, dict): data = data_dict else: raise TypeError("data_dict must be a dict") if data: self.update(data) def update(self, d): for k, v in d.items(): if isinstance(k, bytes): k = k.decode() setattr(self, self._check_key(k), v) def update_internal(self, d): for k, v in d.items(): if isinstance(k, bytes): k = k.decode() self._dict[k] = v def __eq__(self, other): return self.as_dict() == other.as_dict() def __repr__(self): return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) def as_dict(self): """return the internal dictionary""" return StableDict(self._dict) def _check_key(self, key): """make sure key is of type str and known""" if not isinstance(key, str): raise TypeError("key must be str") if key not in self.VALID_KEYS: raise ValueError("key '%s' is not a valid key" % key) return key def __contains__(self, key): """do we have this key?""" return self._check_key(key) in self._dict def get(self, key, default=None): """get value for key, return default if key does not exist""" return getattr(self, self._check_key(key), default) @staticmethod def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): """return a property that deals with self._dict[key]""" assert isinstance(key, str) if value_type_name is None: value_type_name = value_type.__name__ doc = "%s (%s)" % (key, value_type_name) type_error_msg = "%s value must be %s" % (key, value_type_name) attr_error_msg = "attribute %s not found" % key def _get(self): try: value = self._dict[key] except KeyError: raise AttributeError(attr_error_msg) from None if decode is not None: value = decode(value) return value def _set(self, value): if not isinstance(value, value_type): raise TypeError(type_error_msg) if encode is not None: value = encode(value) self._dict[key] = value def _del(self): try: del self._dict[key] except KeyError: raise AttributeError(attr_error_msg) from None return property(_get, _set, _del, doc=doc) ChunkListEntry = namedtuple('ChunkListEntry', 'id size csize') class Item(PropDict): """ Item abstraction that deals with validation and the low-level details internally: Items are created either from msgpack unpacker output, from another dict, from kwargs or built step-by-step by setting attributes. msgpack gives us a dict with bytes-typed keys, just give it to Item(internal_dict=d) and use item.key_name later. msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting such a property and encode when setting it. If an Item shall be serialized, give as_dict() method output to msgpack packer. A bug in Attic up to and including release 0.13 added a (meaningless) 'acl' key to every item. We must never re-use this key. See test_attic013_acl_bug for details. """ VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # properties statically defined, so that IDEs can know their names: path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) acl_access = PropDict._make_property('acl_access', bytes) acl_default = PropDict._make_property('acl_default', bytes) acl_extended = PropDict._make_property('acl_extended', bytes) acl_nfs4 = PropDict._make_property('acl_nfs4', bytes) mode = PropDict._make_property('mode', int) uid = PropDict._make_property('uid', int) gid = PropDict._make_property('gid', int) rdev = PropDict._make_property('rdev', int) bsdflags = PropDict._make_property('bsdflags', int) # note: we need to keep the bigint conversion for compatibility with borg 1.0 archives. atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) birthtime = PropDict._make_property('birthtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) # size is only present for items with a chunk list and then it is sum(chunk_sizes) # compatibility note: this is a new feature, in old archives size will be missing. size = PropDict._make_property('size', int) hardlink_master = PropDict._make_property('hardlink_master', bool) chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None') chunks_healthy = PropDict._make_property('chunks_healthy', (list, type(None)), 'list or None') xattrs = PropDict._make_property('xattrs', StableDict) deleted = PropDict._make_property('deleted', bool) nlink = PropDict._make_property('nlink', int) part = PropDict._make_property('part', int) def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): """ Determine the (uncompressed or compressed) size of this item. For hardlink slaves, the size is computed via the hardlink master's chunk list, if available (otherwise size will be returned as 0). If memorize is True, the computed size value will be stored into the item. """ attr = 'csize' if compressed else 'size' assert not (compressed and memorize), 'Item does not have a csize field.' try: if from_chunks: raise AttributeError size = getattr(self, attr) except AttributeError: if stat.S_ISLNK(self.mode): # get out of here quickly. symlinks have no own chunks, their fs size is the length of the target name. # also, there is the dual-use issue of .source (#2343), so don't confuse it with a hardlink slave. return len(self.source) # no precomputed (c)size value available, compute it: try: chunks = getattr(self, 'chunks') having_chunks = True except AttributeError: having_chunks = False # this item has no (own) chunks list, but if this is a hardlink slave # and we know the master, we can still compute the size. if hardlink_masters is None: chunks = None else: try: master = getattr(self, 'source') except AttributeError: # not a hardlink slave, likely a directory or special file w/o chunks chunks = None else: # hardlink slave, try to fetch hardlink master's chunks list # todo: put precomputed size into hardlink_masters' values and use it, if present chunks, _ = hardlink_masters.get(master, (None, None)) if chunks is None: return 0 size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) # if requested, memorize the precomputed (c)size for items that have an own chunks list: if memorize and having_chunks: setattr(self, attr, size) return size class EncryptedKey(PropDict): """ EncryptedKey abstraction that deals with validation and the low-level details internally: A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or built step-by-step by setting attributes. msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later. If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer. """ VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys __slots__ = ("_dict", ) # avoid setting attributes not supported by properties version = PropDict._make_property('version', int) algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode) iterations = PropDict._make_property('iterations', int) salt = PropDict._make_property('salt', bytes) hash = PropDict._make_property('hash', bytes) data = PropDict._make_property('data', bytes) class Key(PropDict): """ Key abstraction that deals with validation and the low-level details internally: A Key is created either from msgpack unpacker output, from another dict, from kwargs or built step-by-step by setting attributes. msgpack gives us a dict with bytes-typed keys, just give it to Key(d) and use key.xxx later. If a Key shall be serialized, give as_dict() method output to msgpack packer. """ VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed', 'tam_required'} # str-typed keys __slots__ = ("_dict", ) # avoid setting attributes not supported by properties version = PropDict._make_property('version', int) repository_id = PropDict._make_property('repository_id', bytes) enc_key = PropDict._make_property('enc_key', bytes) enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes) id_key = PropDict._make_property('id_key', bytes) chunk_seed = PropDict._make_property('chunk_seed', int) tam_required = PropDict._make_property('tam_required', bool) class ArchiveItem(PropDict): """ ArchiveItem abstraction that deals with validation and the low-level details internally: An ArchiveItem is created either from msgpack unpacker output, from another dict, from kwargs or built step-by-step by setting attributes. msgpack gives us a dict with bytes-typed keys, just give it to ArchiveItem(d) and use arch.xxx later. If a ArchiveItem shall be serialized, give as_dict() method output to msgpack packer. """ VALID_KEYS = ARCHIVE_KEYS # str-typed keys __slots__ = ("_dict", ) # avoid setting attributes not supported by properties version = PropDict._make_property('version', int) name = PropDict._make_property('name', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) items = PropDict._make_property('items', list) cmdline = PropDict._make_property('cmdline', list) # list of s-e-str hostname = PropDict._make_property('hostname', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) username = PropDict._make_property('username', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) time = PropDict._make_property('time', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) time_end = PropDict._make_property('time_end', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) comment = PropDict._make_property('comment', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) chunker_params = PropDict._make_property('chunker_params', tuple) recreate_cmdline = PropDict._make_property('recreate_cmdline', list) # list of s-e-str # recreate_source_id, recreate_args, recreate_partial_chunks were used in 1.1.0b1 .. b2 recreate_source_id = PropDict._make_property('recreate_source_id', bytes) recreate_args = PropDict._make_property('recreate_args', list) # list of s-e-str recreate_partial_chunks = PropDict._make_property('recreate_partial_chunks', list) # list of tuples class ManifestItem(PropDict): """ ManifestItem abstraction that deals with validation and the low-level details internally: A ManifestItem is created either from msgpack unpacker output, from another dict, from kwargs or built step-by-step by setting attributes. msgpack gives us a dict with bytes-typed keys, just give it to ManifestItem(d) and use manifest.xxx later. If a ManifestItem shall be serialized, give as_dict() method output to msgpack packer. """ VALID_KEYS = {'version', 'archives', 'timestamp', 'config', 'item_keys', } # str-typed keys __slots__ = ("_dict", ) # avoid setting attributes not supported by properties version = PropDict._make_property('version', int) archives = PropDict._make_property('archives', dict) # name -> dict timestamp = PropDict._make_property('timestamp', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) config = PropDict._make_property('config', dict) item_keys = PropDict._make_property('item_keys', tuple) borgbackup-1.1.15/src/borg/item.c0000644000175000017500000236563113771325772016554 0ustar useruser00000000000000/* Generated by Cython 0.29.21 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__borg__item #define __PYX_HAVE_API__borg__item /* Early includes */ #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "src/borg/item.pyx", }; /*--- Type declarations ---*/ struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property; struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size; struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr; /* "borg/item.pyx":94 * * @staticmethod * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): # <<<<<<<<<<<<<< * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) */ struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property { PyObject_HEAD PyObject *__pyx_v_attr_error_msg; PyObject *__pyx_v_decode; PyObject *__pyx_v_encode; PyObject *__pyx_v_key; PyObject *__pyx_v_type_error_msg; PyObject *__pyx_v_value_type; }; /* "borg/item.pyx":191 * part = PropDict._make_property('part', int) * * def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): # <<<<<<<<<<<<<< * """ * Determine the (uncompressed or compressed) size of this item. */ struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size { PyObject_HEAD PyObject *__pyx_v_attr; PyObject *__pyx_v_chunks; }; /* "borg/item.pyx":233 * if chunks is None: * return 0 * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) # <<<<<<<<<<<<<< * # if requested, memorize the precomputed (c)size for items that have an own chunks list: * if memorize and having_chunks: */ struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr { PyObject_HEAD struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *__pyx_outer_scope; PyObject *__pyx_v_chunk; PyObject *__pyx_t_0; Py_ssize_t __pyx_t_1; PyObject *(*__pyx_t_2)(PyObject *); }; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* PyObjectSetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS #define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); #else #define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) #define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* PyObjectGetMethod.proto */ static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); /* PyObjectCallMethod0.proto */ static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* UnpackTupleError.proto */ static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /* UnpackTuple2.proto */ #define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple)\ (likely(is_tuple || PyTuple_Check(tuple)) ?\ (likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ?\ __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) :\ (__Pyx_UnpackTupleError(tuple, 2), -1)) :\ __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple)) static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple); static int __Pyx_unpack_tuple2_generic( PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple); /* dict_iter.proto */ static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name, Py_ssize_t* p_orig_length, int* p_is_dict); static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos, PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict); /* PyObjectFormatAndDecref.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f); static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f); /* IncludeStringH.proto */ #include /* JoinPyUnicode.proto */ static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, Py_UCS4 max_char); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PySequenceContains.proto */ static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { int result = PySequence_Contains(seq, item); return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* FetchCommonType.proto */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); /* CythonFunctionShared.proto */ #define __Pyx_CyFunction_USED 1 #define __Pyx_CYFUNCTION_STATICMETHOD 0x01 #define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 #define __Pyx_CYFUNCTION_CCLASS 0x04 #define __Pyx_CyFunction_GetClosure(f)\ (((__pyx_CyFunctionObject *) (f))->func_closure) #define __Pyx_CyFunction_GetClassObj(f)\ (((__pyx_CyFunctionObject *) (f))->func_classobj) #define __Pyx_CyFunction_Defaults(type, f)\ ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) #define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) typedef struct { PyCFunctionObject func; #if PY_VERSION_HEX < 0x030500A0 PyObject *func_weakreflist; #endif PyObject *func_dict; PyObject *func_name; PyObject *func_qualname; PyObject *func_doc; PyObject *func_globals; PyObject *func_code; PyObject *func_closure; PyObject *func_classobj; void *defaults; int defaults_pyobjects; size_t defaults_size; // used by FusedFunction for copying defaults int flags; PyObject *defaults_tuple; PyObject *defaults_kwdict; PyObject *(*defaults_getter)(PyObject *); PyObject *func_annotations; } __pyx_CyFunctionObject; static PyTypeObject *__pyx_CyFunctionType = 0; #define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType)) static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject* code); static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, size_t size, int pyobjects); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, PyObject *dict); static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); static int __pyx_CyFunction_init(void); /* CythonFunction.proto */ static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject *globals, PyObject* code); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* SetNameInClass.proto */ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) #elif CYTHON_COMPILING_IN_CPYTHON #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) #else #define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) #endif /* CalculateMetaclass.proto */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); /* Py3ClassCreate.proto */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc); static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyObjectCallMethod1.proto */ static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); /* CoroutineBase.proto */ typedef PyObject *(*__pyx_coroutine_body_t)(PyObject *, PyThreadState *, PyObject *); #if CYTHON_USE_EXC_INFO_STACK #define __Pyx_ExcInfoStruct _PyErr_StackItem #else typedef struct { PyObject *exc_type; PyObject *exc_value; PyObject *exc_traceback; } __Pyx_ExcInfoStruct; #endif typedef struct { PyObject_HEAD __pyx_coroutine_body_t body; PyObject *closure; __Pyx_ExcInfoStruct gi_exc_state; PyObject *gi_weakreflist; PyObject *classobj; PyObject *yieldfrom; PyObject *gi_name; PyObject *gi_qualname; PyObject *gi_modulename; PyObject *gi_code; int resume_label; char is_running; } __pyx_CoroutineObject; static __pyx_CoroutineObject *__Pyx__Coroutine_New( PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, PyObject *name, PyObject *qualname, PyObject *module_name); static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, PyObject *name, PyObject *qualname, PyObject *module_name); static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); static int __Pyx_Coroutine_clear(PyObject *self); static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); static PyObject *__Pyx_Coroutine_Close(PyObject *self); static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); #if CYTHON_USE_EXC_INFO_STACK #define __Pyx_Coroutine_SwapException(self) #define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) #else #define __Pyx_Coroutine_SwapException(self) {\ __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback);\ __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state);\ } #define __Pyx_Coroutine_ResetAndClearException(self) {\ __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback);\ (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL;\ } #endif #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyGen_FetchStopIterationValue(pvalue)\ __Pyx_PyGen__FetchStopIterationValue(__pyx_tstate, pvalue) #else #define __Pyx_PyGen_FetchStopIterationValue(pvalue)\ __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue) #endif static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); /* PatchModuleWithCoroutine.proto */ static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); /* PatchGeneratorABC.proto */ static int __Pyx_patch_abc(void); /* Generator.proto */ #define __Pyx_Generator_USED static PyTypeObject *__pyx_GeneratorType = 0; #define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType) #define __Pyx_Generator_New(body, code, closure, name, qualname, module_name)\ __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name) static PyObject *__Pyx_Generator_Next(PyObject *self); static int __pyx_Generator_init(void); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'borg.item' */ static PyTypeObject *__pyx_ptype_4borg_4item___pyx_scope_struct___make_property = 0; static PyTypeObject *__pyx_ptype_4borg_4item___pyx_scope_struct_1_get_size = 0; static PyTypeObject *__pyx_ptype_4borg_4item___pyx_scope_struct_2_genexpr = 0; #define __Pyx_MODULE_NAME "borg.item" extern int __pyx_module_is_main_borg__item; int __pyx_module_is_main_borg__item = 0; /* Implementation of 'borg.item' */ static PyObject *__pyx_builtin_staticmethod; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_property; static PyObject *__pyx_builtin_KeyError; static PyObject *__pyx_builtin_AttributeError; static PyObject *__pyx_builtin_sum; static const char __pyx_k_d[] = "d"; static const char __pyx_k_k[] = "k"; static const char __pyx_k_v[] = "v"; static const char __pyx_k__3[] = ")"; static const char __pyx_k__5[] = " ("; static const char __pyx_k_eq[] = "__eq__"; static const char __pyx_k_kw[] = "kw"; static const char __pyx_k_Key[] = "Key"; static const char __pyx_k__38[] = "_"; static const char __pyx_k_del[] = "_del"; static const char __pyx_k_doc[] = "doc"; static const char __pyx_k_get[] = "_get"; static const char __pyx_k_gid[] = "gid"; static const char __pyx_k_key[] = "key"; static const char __pyx_k_set[] = "_set"; static const char __pyx_k_sum[] = "sum"; static const char __pyx_k_uid[] = "uid"; static const char __pyx_k_Item[] = "Item"; static const char __pyx_k_args[] = "args"; static const char __pyx_k_attr[] = "attr"; static const char __pyx_k_data[] = "data"; static const char __pyx_k_dict[] = "_dict"; static const char __pyx_k_hash[] = "hash"; static const char __pyx_k_init[] = "__init__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_part[] = "part"; static const char __pyx_k_path[] = "path"; static const char __pyx_k_rdev[] = "rdev"; static const char __pyx_k_repr[] = "__repr__"; static const char __pyx_k_salt[] = "salt"; static const char __pyx_k_self[] = "self"; static const char __pyx_k_send[] = "send"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_stat[] = "stat"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_time[] = "time"; static const char __pyx_k_user[] = "user"; static const char __pyx_k_atime[] = "atime"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_close[] = "close"; static const char __pyx_k_csize[] = "csize"; static const char __pyx_k_ctime[] = "ctime"; static const char __pyx_k_doc_2[] = "__doc__"; static const char __pyx_k_get_2[] = "get"; static const char __pyx_k_group[] = "group"; static const char __pyx_k_items[] = "items"; static const char __pyx_k_mtime[] = "mtime"; static const char __pyx_k_nlink[] = "nlink"; static const char __pyx_k_other[] = "other"; static const char __pyx_k_slots[] = "__slots__"; static const char __pyx_k_throw[] = "throw"; static const char __pyx_k_value[] = "value"; static const char __pyx_k_1_1_03[] = "1.1_03"; static const char __pyx_k_bigint[] = "bigint"; static const char __pyx_k_chunks[] = "chunks"; static const char __pyx_k_config[] = "config"; static const char __pyx_k_decode[] = "decode"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_id_key[] = "id_key"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_master[] = "master"; static const char __pyx_k_module[] = "__module__"; static const char __pyx_k_name_2[] = "name"; static const char __pyx_k_source[] = "source"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_xattrs[] = "xattrs"; static const char __pyx_k_S_ISLNK[] = "S_ISLNK"; static const char __pyx_k_as_dict[] = "as_dict"; static const char __pyx_k_cmdline[] = "cmdline"; static const char __pyx_k_comment[] = "comment"; static const char __pyx_k_default[] = "default"; static const char __pyx_k_deleted[] = "deleted"; static const char __pyx_k_enc_key[] = "enc_key"; static const char __pyx_k_genexpr[] = "genexpr"; static const char __pyx_k_helpers[] = "helpers"; static const char __pyx_k_prepare[] = "__prepare__"; static const char __pyx_k_version[] = "version"; static const char __pyx_k_KeyError[] = "KeyError"; static const char __pyx_k_PropDict[] = "PropDict"; static const char __pyx_k_acl_nfs4[] = "acl_nfs4"; static const char __pyx_k_archives[] = "archives"; static const char __pyx_k_bsdflags[] = "bsdflags"; static const char __pyx_k_contains[] = "__contains__"; static const char __pyx_k_get_size[] = "get_size"; static const char __pyx_k_hostname[] = "hostname"; static const char __pyx_k_memorize[] = "memorize"; static const char __pyx_k_property[] = "property"; static const char __pyx_k_qualname[] = "__qualname__"; static const char __pyx_k_time_end[] = "time_end"; static const char __pyx_k_username[] = "username"; static const char __pyx_k_ITEM_KEYS[] = "ITEM_KEYS"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_algorithm[] = "algorithm"; static const char __pyx_k_birthtime[] = "birthtime"; static const char __pyx_k_borg_item[] = "borg.item"; static const char __pyx_k_check_key[] = "_check_key"; static const char __pyx_k_constants[] = "constants"; static const char __pyx_k_data_dict[] = "data_dict"; static const char __pyx_k_item_keys[] = "item_keys"; static const char __pyx_k_metaclass[] = "__metaclass__"; static const char __pyx_k_timestamp[] = "timestamp"; static const char __pyx_k_StableDict[] = "StableDict"; static const char __pyx_k_VALID_KEYS[] = "VALID_KEYS"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_acl_access[] = "acl_access"; static const char __pyx_k_chunk_seed[] = "chunk_seed"; static const char __pyx_k_compressed[] = "compressed"; static const char __pyx_k_iterations[] = "iterations"; static const char __pyx_k_namedtuple[] = "namedtuple"; static const char __pyx_k_value_type[] = "value_type"; static const char __pyx_k_API_VERSION[] = "API_VERSION"; static const char __pyx_k_ArchiveItem[] = "ArchiveItem"; static const char __pyx_k_acl_default[] = "acl_default"; static const char __pyx_k_collections[] = "collections"; static const char __pyx_k_from_chunks[] = "from_chunks"; static const char __pyx_k_safe_decode[] = "safe_decode"; static const char __pyx_k_safe_encode[] = "safe_encode"; static const char __pyx_k_ARCHIVE_KEYS[] = "ARCHIVE_KEYS"; static const char __pyx_k_EncryptedKey[] = "EncryptedKey"; static const char __pyx_k_ManifestItem[] = "ManifestItem"; static const char __pyx_k_PropDict_get[] = "PropDict.get"; static const char __pyx_k_acl_extended[] = "acl_extended"; static const char __pyx_k_enc_hmac_key[] = "enc_hmac_key"; static const char __pyx_k_list_or_None[] = "list or None"; static const char __pyx_k_staticmethod[] = "staticmethod"; static const char __pyx_k_tam_required[] = "tam_required"; static const char __pyx_k_Item_get_size[] = "Item.get_size"; static const char __pyx_k_PropDict___eq[] = "PropDict.__eq__"; static const char __pyx_k_bigint_to_int[] = "bigint_to_int"; static const char __pyx_k_having_chunks[] = "having_chunks"; static const char __pyx_k_id_size_csize[] = "id size csize"; static const char __pyx_k_int_to_bigint[] = "int_to_bigint"; static const char __pyx_k_internal_dict[] = "internal_dict"; static const char __pyx_k_make_property[] = "_make_property"; static const char __pyx_k_recreate_args[] = "recreate_args"; static const char __pyx_k_repository_id[] = "repository_id"; static const char __pyx_k_value_must_be[] = " value must be "; static const char __pyx_k_AttributeError[] = "AttributeError"; static const char __pyx_k_ChunkListEntry[] = "ChunkListEntry"; static const char __pyx_k_attr_error_msg[] = "attr_error_msg"; static const char __pyx_k_chunker_params[] = "chunker_params"; static const char __pyx_k_chunks_healthy[] = "chunks_healthy"; static const char __pyx_k_type_error_msg[] = "type_error_msg"; static const char __pyx_k_PropDict___init[] = "PropDict.__init__"; static const char __pyx_k_PropDict___repr[] = "PropDict.__repr__"; static const char __pyx_k_PropDict_update[] = "PropDict.update"; static const char __pyx_k_hardlink_master[] = "hardlink_master"; static const char __pyx_k_internal_dict_2[] = "(internal_dict="; static const char __pyx_k_key_must_be_str[] = "key must be str"; static const char __pyx_k_update_internal[] = "update_internal"; static const char __pyx_k_value_type_name[] = "value_type_name"; static const char __pyx_k_PropDict_as_dict[] = "PropDict.as_dict"; static const char __pyx_k_hardlink_masters[] = "hardlink_masters"; static const char __pyx_k_recreate_cmdline[] = "recreate_cmdline"; static const char __pyx_k_src_borg_item_pyx[] = "src/borg/item.pyx"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_recreate_source_id[] = "recreate_source_id"; static const char __pyx_k_PropDict___contains[] = "PropDict.__contains__"; static const char __pyx_k_PropDict__check_key[] = "PropDict._check_key"; static const char __pyx_k_attribute_s_not_found[] = "attribute %s not found"; static const char __pyx_k_surrogate_escaped_str[] = "surrogate-escaped str"; static const char __pyx_k_PropDict__make_property[] = "PropDict._make_property"; static const char __pyx_k_recreate_partial_chunks[] = "recreate_partial_chunks"; static const char __pyx_k_PropDict_update_internal[] = "PropDict.update_internal"; static const char __pyx_k_data_dict_must_be_a_dict[] = "data_dict must be a dict"; static const char __pyx_k_key_s_is_not_a_valid_key[] = "key '%s' is not a valid key"; static const char __pyx_k_Item_get_size_locals_genexpr[] = "Item.get_size..genexpr"; static const char __pyx_k_internal_dict_must_be_a_dict[] = "internal_dict must be a dict"; static const char __pyx_k_surrogate_escaped_str_or_None[] = "surrogate-escaped str or None"; static const char __pyx_k_PropDict__make_property_locals[] = "PropDict._make_property.._get"; static const char __pyx_k_ArchiveItem_abstraction_that_de[] = "\n ArchiveItem abstraction that deals with validation and the low-level details internally:\n\n An ArchiveItem is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to ArchiveItem(d) and use arch.xxx later.\n\n If a ArchiveItem shall be serialized, give as_dict() method output to msgpack packer.\n "; static const char __pyx_k_EncryptedKey_abstraction_that_d[] = "\n EncryptedKey abstraction that deals with validation and the low-level details internally:\n\n A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later.\n\n If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.\n "; static const char __pyx_k_Item_abstraction_that_deals_wit[] = "\n Item abstraction that deals with validation and the low-level details internally:\n\n Items are created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Item(internal_dict=d) and use item.key_name later.\n msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting\n such a property and encode when setting it.\n\n If an Item shall be serialized, give as_dict() method output to msgpack packer.\n\n A bug in Attic up to and including release 0.13 added a (meaningless) 'acl' key to every item.\n We must never re-use this key. See test_attic013_acl_bug for details.\n "; static const char __pyx_k_Key_abstraction_that_deals_with[] = "\n Key abstraction that deals with validation and the low-level details internally:\n\n A Key is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Key(d) and use key.xxx later.\n\n If a Key shall be serialized, give as_dict() method output to msgpack packer.\n "; static const char __pyx_k_Manage_a_dictionary_via_propert[] = "\n Manage a dictionary via properties.\n\n - initialization by giving a dict or kw args\n - on initialization, normalize dict keys to be str type\n - access dict via properties, like: x.key_name\n - membership check via: 'key_name' in x\n - optionally, encode when setting a value\n - optionally, decode when getting a value\n - be safe against typos in key names: check against VALID_KEYS\n - when setting a value: check type of value\n\n When \"packing\" a dict, ie. you have a dict with some data and want to convert it into an instance,\n then use eg. Item({'a': 1, ...}). This way all keys in your dictionary are validated.\n\n When \"unpacking\", that is you've read a dictionary with some data from somewhere (eg. msgpack),\n then use eg. Item(internal_dict={...}). This does not validate the keys, therefore unknown keys\n are ignored instead of causing an error.\n "; static const char __pyx_k_ManifestItem_abstraction_that_d[] = "\n ManifestItem abstraction that deals with validation and the low-level details internally:\n\n A ManifestItem is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to ManifestItem(d) and use manifest.xxx later.\n\n If a ManifestItem shall be serialized, give as_dict() method output to msgpack packer.\n "; static const char __pyx_k_Item_does_not_have_a_csize_field[] = "Item does not have a csize field."; static const char __pyx_k_PropDict__make_property_locals_2[] = "PropDict._make_property.._set"; static const char __pyx_k_PropDict__make_property_locals_3[] = "PropDict._make_property.._del"; static PyObject *__pyx_kp_u_1_1_03; static PyObject *__pyx_n_s_API_VERSION; static PyObject *__pyx_n_s_ARCHIVE_KEYS; static PyObject *__pyx_n_s_ArchiveItem; static PyObject *__pyx_kp_s_ArchiveItem_abstraction_that_de; static PyObject *__pyx_n_s_AttributeError; static PyObject *__pyx_n_s_ChunkListEntry; static PyObject *__pyx_n_u_ChunkListEntry; static PyObject *__pyx_n_s_EncryptedKey; static PyObject *__pyx_kp_s_EncryptedKey_abstraction_that_d; static PyObject *__pyx_n_s_ITEM_KEYS; static PyObject *__pyx_n_s_Item; static PyObject *__pyx_kp_s_Item_abstraction_that_deals_wit; static PyObject *__pyx_kp_u_Item_does_not_have_a_csize_field; static PyObject *__pyx_n_s_Item_get_size; static PyObject *__pyx_n_s_Item_get_size_locals_genexpr; static PyObject *__pyx_n_s_Key; static PyObject *__pyx_n_s_KeyError; static PyObject *__pyx_kp_s_Key_abstraction_that_deals_with; static PyObject *__pyx_kp_s_Manage_a_dictionary_via_propert; static PyObject *__pyx_n_s_ManifestItem; static PyObject *__pyx_kp_s_ManifestItem_abstraction_that_d; static PyObject *__pyx_n_s_PropDict; static PyObject *__pyx_n_s_PropDict___contains; static PyObject *__pyx_n_s_PropDict___eq; static PyObject *__pyx_n_s_PropDict___init; static PyObject *__pyx_n_s_PropDict___repr; static PyObject *__pyx_n_s_PropDict__check_key; static PyObject *__pyx_n_s_PropDict__make_property; static PyObject *__pyx_n_s_PropDict__make_property_locals; static PyObject *__pyx_n_s_PropDict__make_property_locals_2; static PyObject *__pyx_n_s_PropDict__make_property_locals_3; static PyObject *__pyx_n_s_PropDict_as_dict; static PyObject *__pyx_n_s_PropDict_get; static PyObject *__pyx_n_s_PropDict_update; static PyObject *__pyx_n_s_PropDict_update_internal; static PyObject *__pyx_n_s_S_ISLNK; static PyObject *__pyx_n_s_StableDict; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_n_s_VALID_KEYS; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_kp_u__3; static PyObject *__pyx_n_s__38; static PyObject *__pyx_kp_u__5; static PyObject *__pyx_n_s_acl_access; static PyObject *__pyx_n_u_acl_access; static PyObject *__pyx_n_s_acl_default; static PyObject *__pyx_n_u_acl_default; static PyObject *__pyx_n_s_acl_extended; static PyObject *__pyx_n_u_acl_extended; static PyObject *__pyx_n_s_acl_nfs4; static PyObject *__pyx_n_u_acl_nfs4; static PyObject *__pyx_n_s_algorithm; static PyObject *__pyx_n_u_algorithm; static PyObject *__pyx_n_s_archives; static PyObject *__pyx_n_u_archives; static PyObject *__pyx_n_s_args; static PyObject *__pyx_n_s_as_dict; static PyObject *__pyx_n_s_atime; static PyObject *__pyx_n_u_atime; static PyObject *__pyx_n_s_attr; static PyObject *__pyx_n_s_attr_error_msg; static PyObject *__pyx_kp_u_attribute_s_not_found; static PyObject *__pyx_n_u_bigint; static PyObject *__pyx_n_s_bigint_to_int; static PyObject *__pyx_n_s_birthtime; static PyObject *__pyx_n_u_birthtime; static PyObject *__pyx_n_s_borg_item; static PyObject *__pyx_n_s_bsdflags; static PyObject *__pyx_n_u_bsdflags; static PyObject *__pyx_n_s_check_key; static PyObject *__pyx_n_s_chunk_seed; static PyObject *__pyx_n_u_chunk_seed; static PyObject *__pyx_n_s_chunker_params; static PyObject *__pyx_n_u_chunker_params; static PyObject *__pyx_n_s_chunks; static PyObject *__pyx_n_u_chunks; static PyObject *__pyx_n_s_chunks_healthy; static PyObject *__pyx_n_u_chunks_healthy; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_close; static PyObject *__pyx_n_s_cmdline; static PyObject *__pyx_n_u_cmdline; static PyObject *__pyx_n_s_collections; static PyObject *__pyx_n_s_comment; static PyObject *__pyx_n_u_comment; static PyObject *__pyx_n_s_compressed; static PyObject *__pyx_n_s_config; static PyObject *__pyx_n_u_config; static PyObject *__pyx_n_s_constants; static PyObject *__pyx_n_s_contains; static PyObject *__pyx_n_u_csize; static PyObject *__pyx_n_s_ctime; static PyObject *__pyx_n_u_ctime; static PyObject *__pyx_n_s_d; static PyObject *__pyx_n_s_data; static PyObject *__pyx_n_u_data; static PyObject *__pyx_n_s_data_dict; static PyObject *__pyx_kp_u_data_dict_must_be_a_dict; static PyObject *__pyx_n_s_decode; static PyObject *__pyx_n_s_default; static PyObject *__pyx_n_s_del; static PyObject *__pyx_n_s_deleted; static PyObject *__pyx_n_u_deleted; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_u_dict; static PyObject *__pyx_n_s_doc; static PyObject *__pyx_n_s_doc_2; static PyObject *__pyx_n_s_enc_hmac_key; static PyObject *__pyx_n_u_enc_hmac_key; static PyObject *__pyx_n_s_enc_key; static PyObject *__pyx_n_u_enc_key; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_eq; static PyObject *__pyx_n_s_from_chunks; static PyObject *__pyx_n_s_genexpr; static PyObject *__pyx_n_s_get; static PyObject *__pyx_n_s_get_2; static PyObject *__pyx_n_s_get_size; static PyObject *__pyx_n_s_gid; static PyObject *__pyx_n_u_gid; static PyObject *__pyx_n_s_group; static PyObject *__pyx_n_u_group; static PyObject *__pyx_n_s_hardlink_master; static PyObject *__pyx_n_u_hardlink_master; static PyObject *__pyx_n_s_hardlink_masters; static PyObject *__pyx_n_s_hash; static PyObject *__pyx_n_u_hash; static PyObject *__pyx_n_s_having_chunks; static PyObject *__pyx_n_s_helpers; static PyObject *__pyx_n_s_hostname; static PyObject *__pyx_n_u_hostname; static PyObject *__pyx_n_s_id_key; static PyObject *__pyx_n_u_id_key; static PyObject *__pyx_kp_u_id_size_csize; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_init; static PyObject *__pyx_n_s_int_to_bigint; static PyObject *__pyx_n_s_internal_dict; static PyObject *__pyx_kp_u_internal_dict_2; static PyObject *__pyx_kp_u_internal_dict_must_be_a_dict; static PyObject *__pyx_n_s_item_keys; static PyObject *__pyx_n_u_item_keys; static PyObject *__pyx_n_s_items; static PyObject *__pyx_n_u_items; static PyObject *__pyx_n_s_iterations; static PyObject *__pyx_n_u_iterations; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_key; static PyObject *__pyx_kp_u_key_must_be_str; static PyObject *__pyx_kp_u_key_s_is_not_a_valid_key; static PyObject *__pyx_n_s_kw; static PyObject *__pyx_kp_u_list_or_None; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_make_property; static PyObject *__pyx_n_s_master; static PyObject *__pyx_n_s_memorize; static PyObject *__pyx_n_s_metaclass; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_u_mode; static PyObject *__pyx_n_s_module; static PyObject *__pyx_n_s_mtime; static PyObject *__pyx_n_u_mtime; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_u_name_2; static PyObject *__pyx_n_s_namedtuple; static PyObject *__pyx_n_s_nlink; static PyObject *__pyx_n_u_nlink; static PyObject *__pyx_n_s_other; static PyObject *__pyx_n_s_part; static PyObject *__pyx_n_u_part; static PyObject *__pyx_n_s_path; static PyObject *__pyx_n_u_path; static PyObject *__pyx_n_s_prepare; static PyObject *__pyx_n_s_property; static PyObject *__pyx_n_s_qualname; static PyObject *__pyx_n_s_rdev; static PyObject *__pyx_n_u_rdev; static PyObject *__pyx_n_s_recreate_args; static PyObject *__pyx_n_u_recreate_args; static PyObject *__pyx_n_s_recreate_cmdline; static PyObject *__pyx_n_u_recreate_cmdline; static PyObject *__pyx_n_s_recreate_partial_chunks; static PyObject *__pyx_n_u_recreate_partial_chunks; static PyObject *__pyx_n_s_recreate_source_id; static PyObject *__pyx_n_u_recreate_source_id; static PyObject *__pyx_n_s_repository_id; static PyObject *__pyx_n_u_repository_id; static PyObject *__pyx_n_s_repr; static PyObject *__pyx_n_s_safe_decode; static PyObject *__pyx_n_s_safe_encode; static PyObject *__pyx_n_s_salt; static PyObject *__pyx_n_u_salt; static PyObject *__pyx_n_s_self; static PyObject *__pyx_n_s_send; static PyObject *__pyx_n_s_set; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_u_size; static PyObject *__pyx_n_s_slots; static PyObject *__pyx_n_s_source; static PyObject *__pyx_n_u_source; static PyObject *__pyx_kp_s_src_borg_item_pyx; static PyObject *__pyx_n_s_stat; static PyObject *__pyx_n_s_staticmethod; static PyObject *__pyx_n_s_sum; static PyObject *__pyx_kp_u_surrogate_escaped_str; static PyObject *__pyx_kp_u_surrogate_escaped_str_or_None; static PyObject *__pyx_n_s_tam_required; static PyObject *__pyx_n_u_tam_required; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_throw; static PyObject *__pyx_n_s_time; static PyObject *__pyx_n_u_time; static PyObject *__pyx_n_s_time_end; static PyObject *__pyx_n_u_time_end; static PyObject *__pyx_n_s_timestamp; static PyObject *__pyx_n_u_timestamp; static PyObject *__pyx_n_s_type_error_msg; static PyObject *__pyx_n_s_uid; static PyObject *__pyx_n_u_uid; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_update_internal; static PyObject *__pyx_n_s_user; static PyObject *__pyx_n_u_user; static PyObject *__pyx_n_s_username; static PyObject *__pyx_n_u_username; static PyObject *__pyx_n_s_v; static PyObject *__pyx_n_s_value; static PyObject *__pyx_kp_u_value_must_be; static PyObject *__pyx_n_s_value_type; static PyObject *__pyx_n_s_value_type_name; static PyObject *__pyx_n_s_version; static PyObject *__pyx_n_u_version; static PyObject *__pyx_n_s_xattrs; static PyObject *__pyx_n_u_xattrs; static PyObject *__pyx_pf_4borg_4item_8PropDict___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data_dict, PyObject *__pyx_v_internal_dict, PyObject *__pyx_v_kw); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_2update(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_d); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_4update_internal(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_d); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_6__eq__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_other); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_8__repr__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_10as_dict(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_12_check_key(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_14__contains__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_16get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_default); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_14_make_property__get(PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_14_make_property_2_set(PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_14_make_property_4_del(PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_4item_8PropDict_18_make_property(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value_type, PyObject *__pyx_v_value_type_name, PyObject *__pyx_v_encode, PyObject *__pyx_v_decode); /* proto */ static PyObject *__pyx_pf_4borg_4item_4Item_8get_size_genexpr(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_pf_4borg_4item_4Item_get_size(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_hardlink_masters, PyObject *__pyx_v_memorize, PyObject *__pyx_v_compressed, PyObject *__pyx_v_from_chunks); /* proto */ static PyObject *__pyx_tp_new_4borg_4item___pyx_scope_struct___make_property(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_4item___pyx_scope_struct_1_get_size(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_4item___pyx_scope_struct_2_genexpr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__33; static PyObject *__pyx_tuple__34; static PyObject *__pyx_tuple__36; static PyObject *__pyx_tuple__37; static PyObject *__pyx_tuple__39; static PyObject *__pyx_tuple__41; static PyObject *__pyx_codeobj__7; static PyObject *__pyx_codeobj__9; static PyObject *__pyx_codeobj__11; static PyObject *__pyx_codeobj__15; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; static PyObject *__pyx_codeobj__30; static PyObject *__pyx_codeobj__32; static PyObject *__pyx_codeobj__35; static PyObject *__pyx_codeobj__40; /* Late includes */ /* "borg/item.pyx":38 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * def __init__(self, data_dict=None, internal_dict=None, **kw): # <<<<<<<<<<<<<< * self._dict = {} * if internal_dict is None: */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data_dict = 0; PyObject *__pyx_v_internal_dict = 0; PyObject *__pyx_v_kw = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); __pyx_v_kw = PyDict_New(); if (unlikely(!__pyx_v_kw)) return NULL; __Pyx_GOTREF(__pyx_v_kw); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data_dict,&__pyx_n_s_internal_dict,0}; PyObject* values[3] = {0,0,0}; values[1] = ((PyObject *)((PyObject *)Py_None)); values[2] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data_dict); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_internal_dict); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kw, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_data_dict = values[1]; __pyx_v_internal_dict = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kw); __pyx_v_kw = 0; __Pyx_AddTraceback("borg.item.PropDict.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict___init__(__pyx_self, __pyx_v_self, __pyx_v_data_dict, __pyx_v_internal_dict, __pyx_v_kw); /* function exit code */ __Pyx_XDECREF(__pyx_v_kw); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data_dict, PyObject *__pyx_v_internal_dict, PyObject *__pyx_v_kw) { PyObject *__pyx_v_data = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "borg/item.pyx":39 * * def __init__(self, data_dict=None, internal_dict=None, **kw): * self._dict = {} # <<<<<<<<<<<<<< * if internal_dict is None: * pass # nothing to do */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_dict, __pyx_t_1) < 0) __PYX_ERR(0, 39, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":40 * def __init__(self, data_dict=None, internal_dict=None, **kw): * self._dict = {} * if internal_dict is None: # <<<<<<<<<<<<<< * pass # nothing to do * elif isinstance(internal_dict, dict): */ __pyx_t_2 = (__pyx_v_internal_dict == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { goto __pyx_L3; } /* "borg/item.pyx":42 * if internal_dict is None: * pass # nothing to do * elif isinstance(internal_dict, dict): # <<<<<<<<<<<<<< * self.update_internal(internal_dict) * else: */ __pyx_t_3 = PyDict_Check(__pyx_v_internal_dict); __pyx_t_2 = (__pyx_t_3 != 0); if (likely(__pyx_t_2)) { /* "borg/item.pyx":43 * pass # nothing to do * elif isinstance(internal_dict, dict): * self.update_internal(internal_dict) # <<<<<<<<<<<<<< * else: * raise TypeError("internal_dict must be a dict") */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_update_internal); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 43, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_internal_dict) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_internal_dict); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":42 * if internal_dict is None: * pass # nothing to do * elif isinstance(internal_dict, dict): # <<<<<<<<<<<<<< * self.update_internal(internal_dict) * else: */ goto __pyx_L3; } /* "borg/item.pyx":45 * self.update_internal(internal_dict) * else: * raise TypeError("internal_dict must be a dict") # <<<<<<<<<<<<<< * if data_dict is None: * data = kw */ /*else*/ { __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 45, __pyx_L1_error) } __pyx_L3:; /* "borg/item.pyx":46 * else: * raise TypeError("internal_dict must be a dict") * if data_dict is None: # <<<<<<<<<<<<<< * data = kw * elif isinstance(data_dict, dict): */ __pyx_t_2 = (__pyx_v_data_dict == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "borg/item.pyx":47 * raise TypeError("internal_dict must be a dict") * if data_dict is None: * data = kw # <<<<<<<<<<<<<< * elif isinstance(data_dict, dict): * data = data_dict */ __Pyx_INCREF(__pyx_v_kw); __pyx_v_data = __pyx_v_kw; /* "borg/item.pyx":46 * else: * raise TypeError("internal_dict must be a dict") * if data_dict is None: # <<<<<<<<<<<<<< * data = kw * elif isinstance(data_dict, dict): */ goto __pyx_L4; } /* "borg/item.pyx":48 * if data_dict is None: * data = kw * elif isinstance(data_dict, dict): # <<<<<<<<<<<<<< * data = data_dict * else: */ __pyx_t_3 = PyDict_Check(__pyx_v_data_dict); __pyx_t_2 = (__pyx_t_3 != 0); if (likely(__pyx_t_2)) { /* "borg/item.pyx":49 * data = kw * elif isinstance(data_dict, dict): * data = data_dict # <<<<<<<<<<<<<< * else: * raise TypeError("data_dict must be a dict") */ __Pyx_INCREF(__pyx_v_data_dict); __pyx_v_data = __pyx_v_data_dict; /* "borg/item.pyx":48 * if data_dict is None: * data = kw * elif isinstance(data_dict, dict): # <<<<<<<<<<<<<< * data = data_dict * else: */ goto __pyx_L4; } /* "borg/item.pyx":51 * data = data_dict * else: * raise TypeError("data_dict must be a dict") # <<<<<<<<<<<<<< * if data: * self.update(data) */ /*else*/ { __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 51, __pyx_L1_error) } __pyx_L4:; /* "borg/item.pyx":52 * else: * raise TypeError("data_dict must be a dict") * if data: # <<<<<<<<<<<<<< * self.update(data) * */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_data); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 52, __pyx_L1_error) if (__pyx_t_2) { /* "borg/item.pyx":53 * raise TypeError("data_dict must be a dict") * if data: * self.update(data) # <<<<<<<<<<<<<< * * def update(self, d): */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_update); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_data); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":52 * else: * raise TypeError("data_dict must be a dict") * if data: # <<<<<<<<<<<<<< * self.update(data) * */ } /* "borg/item.pyx":38 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * def __init__(self, data_dict=None, internal_dict=None, **kw): # <<<<<<<<<<<<<< * self._dict = {} * if internal_dict is None: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.item.PropDict.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_data); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":55 * self.update(data) * * def update(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_3update(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_3update = {"update", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_3update, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_3update(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_d = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("update (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("update", 1, 2, 2, 1); __PYX_ERR(0, 55, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "update") < 0)) __PYX_ERR(0, 55, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_d = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("update", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 55, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict.update", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_2update(__pyx_self, __pyx_v_self, __pyx_v_d); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_2update(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_d) { PyObject *__pyx_v_k = NULL; PyObject *__pyx_v_v = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("update", 0); /* "borg/item.pyx":56 * * def update(self, d): * for k, v in d.items(): # <<<<<<<<<<<<<< * if isinstance(k, bytes): * k = k.decode() */ __pyx_t_2 = 0; if (unlikely(__pyx_v_d == Py_None)) { PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "items"); __PYX_ERR(0, 56, __pyx_L1_error) } __pyx_t_5 = __Pyx_dict_iterator(__pyx_v_d, 0, __pyx_n_s_items, (&__pyx_t_3), (&__pyx_t_4)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 56, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = __pyx_t_5; __pyx_t_5 = 0; while (1) { __pyx_t_7 = __Pyx_dict_iter_next(__pyx_t_1, __pyx_t_3, &__pyx_t_2, &__pyx_t_5, &__pyx_t_6, NULL, __pyx_t_4); if (unlikely(__pyx_t_7 == 0)) break; if (unlikely(__pyx_t_7 == -1)) __PYX_ERR(0, 56, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":57 * def update(self, d): * for k, v in d.items(): * if isinstance(k, bytes): # <<<<<<<<<<<<<< * k = k.decode() * setattr(self, self._check_key(k), v) */ __pyx_t_8 = PyBytes_Check(__pyx_v_k); __pyx_t_9 = (__pyx_t_8 != 0); if (__pyx_t_9) { /* "borg/item.pyx":58 * for k, v in d.items(): * if isinstance(k, bytes): * k = k.decode() # <<<<<<<<<<<<<< * setattr(self, self._check_key(k), v) * */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_k, __pyx_n_s_decode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_6 = (__pyx_t_10) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_10) : __Pyx_PyObject_CallNoArg(__pyx_t_5); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_k, __pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":57 * def update(self, d): * for k, v in d.items(): * if isinstance(k, bytes): # <<<<<<<<<<<<<< * k = k.decode() * setattr(self, self._check_key(k), v) */ } /* "borg/item.pyx":59 * if isinstance(k, bytes): * k = k.decode() * setattr(self, self._check_key(k), v) # <<<<<<<<<<<<<< * * def update_internal(self, d): */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_check_key); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 59, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_6 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_10, __pyx_v_k) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_k); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 59, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_11 = PyObject_SetAttr(__pyx_v_self, __pyx_t_6, __pyx_v_v); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 59, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":55 * self.update(data) * * def update(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("borg.item.PropDict.update", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_k); __Pyx_XDECREF(__pyx_v_v); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":61 * setattr(self, self._check_key(k), v) * * def update_internal(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_5update_internal(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_5update_internal = {"update_internal", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_5update_internal, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_5update_internal(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_d = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("update_internal (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("update_internal", 1, 2, 2, 1); __PYX_ERR(0, 61, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "update_internal") < 0)) __PYX_ERR(0, 61, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_d = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("update_internal", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 61, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict.update_internal", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_4update_internal(__pyx_self, __pyx_v_self, __pyx_v_d); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_4update_internal(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_d) { PyObject *__pyx_v_k = NULL; PyObject *__pyx_v_v = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("update_internal", 0); /* "borg/item.pyx":62 * * def update_internal(self, d): * for k, v in d.items(): # <<<<<<<<<<<<<< * if isinstance(k, bytes): * k = k.decode() */ __pyx_t_2 = 0; if (unlikely(__pyx_v_d == Py_None)) { PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "items"); __PYX_ERR(0, 62, __pyx_L1_error) } __pyx_t_5 = __Pyx_dict_iterator(__pyx_v_d, 0, __pyx_n_s_items, (&__pyx_t_3), (&__pyx_t_4)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = __pyx_t_5; __pyx_t_5 = 0; while (1) { __pyx_t_7 = __Pyx_dict_iter_next(__pyx_t_1, __pyx_t_3, &__pyx_t_2, &__pyx_t_5, &__pyx_t_6, NULL, __pyx_t_4); if (unlikely(__pyx_t_7 == 0)) break; if (unlikely(__pyx_t_7 == -1)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":63 * def update_internal(self, d): * for k, v in d.items(): * if isinstance(k, bytes): # <<<<<<<<<<<<<< * k = k.decode() * self._dict[k] = v */ __pyx_t_8 = PyBytes_Check(__pyx_v_k); __pyx_t_9 = (__pyx_t_8 != 0); if (__pyx_t_9) { /* "borg/item.pyx":64 * for k, v in d.items(): * if isinstance(k, bytes): * k = k.decode() # <<<<<<<<<<<<<< * self._dict[k] = v * */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_k, __pyx_n_s_decode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_6 = (__pyx_t_10) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_10) : __Pyx_PyObject_CallNoArg(__pyx_t_5); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_k, __pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":63 * def update_internal(self, d): * for k, v in d.items(): * if isinstance(k, bytes): # <<<<<<<<<<<<<< * k = k.decode() * self._dict[k] = v */ } /* "borg/item.pyx":65 * if isinstance(k, bytes): * k = k.decode() * self._dict[k] = v # <<<<<<<<<<<<<< * * def __eq__(self, other): */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(PyObject_SetItem(__pyx_t_6, __pyx_v_k, __pyx_v_v) < 0)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":61 * setattr(self, self._check_key(k), v) * * def update_internal(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("borg.item.PropDict.update_internal", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_k); __Pyx_XDECREF(__pyx_v_v); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":67 * self._dict[k] = v * * def __eq__(self, other): # <<<<<<<<<<<<<< * return self.as_dict() == other.as_dict() * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_7__eq__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_7__eq__ = {"__eq__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_7__eq__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_7__eq__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_other = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__eq__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_other,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_other)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__eq__", 1, 2, 2, 1); __PYX_ERR(0, 67, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__eq__") < 0)) __PYX_ERR(0, 67, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_other = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__eq__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 67, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_6__eq__(__pyx_self, __pyx_v_self, __pyx_v_other); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_6__eq__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_other) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__eq__", 0); /* "borg/item.pyx":68 * * def __eq__(self, other): * return self.as_dict() == other.as_dict() # <<<<<<<<<<<<<< * * def __repr__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_as_dict); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_other, __pyx_n_s_as_dict); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "borg/item.pyx":67 * self._dict[k] = v * * def __eq__(self, other): # <<<<<<<<<<<<<< * return self.as_dict() == other.as_dict() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.item.PropDict.__eq__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":70 * return self.as_dict() == other.as_dict() * * def __repr__(self): # <<<<<<<<<<<<<< * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_9__repr__(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_9__repr__ = {"__repr__", (PyCFunction)__pyx_pw_4borg_4item_8PropDict_9__repr__, METH_O, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_9__repr__(PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_4item_8PropDict_8__repr__(__pyx_self, ((PyObject *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_8__repr__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; Py_UCS4 __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "borg/item.pyx":71 * * def __repr__(self): * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) # <<<<<<<<<<<<<< * * def as_dict(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = 127; __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_class); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_name); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Unicode(__pyx_t_5), __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_3 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) > __pyx_t_3) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) : __pyx_t_3; __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __pyx_t_4 = 0; __Pyx_INCREF(__pyx_kp_u_internal_dict_2); __pyx_t_2 += 15; __Pyx_GIVEREF(__pyx_kp_u_internal_dict_2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_kp_u_internal_dict_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Repr(__pyx_t_4), __pyx_empty_unicode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5) > __pyx_t_3) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_5) : __pyx_t_3; __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_5); __pyx_t_5 = 0; __Pyx_INCREF(__pyx_kp_u__3); __pyx_t_2 += 1; __Pyx_GIVEREF(__pyx_kp_u__3); PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_kp_u__3); __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_1, 4, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "borg/item.pyx":70 * return self.as_dict() == other.as_dict() * * def __repr__(self): # <<<<<<<<<<<<<< * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.item.PropDict.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":73 * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * * def as_dict(self): # <<<<<<<<<<<<<< * """return the internal dictionary""" * return StableDict(self._dict) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_11as_dict(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/ static char __pyx_doc_4borg_4item_8PropDict_10as_dict[] = "return the internal dictionary"; static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_11as_dict = {"as_dict", (PyCFunction)__pyx_pw_4borg_4item_8PropDict_11as_dict, METH_O, __pyx_doc_4borg_4item_8PropDict_10as_dict}; static PyObject *__pyx_pw_4borg_4item_8PropDict_11as_dict(PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("as_dict (wrapper)", 0); __pyx_r = __pyx_pf_4borg_4item_8PropDict_10as_dict(__pyx_self, ((PyObject *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_10as_dict(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("as_dict", 0); /* "borg/item.pyx":75 * def as_dict(self): * """return the internal dictionary""" * return StableDict(self._dict) # <<<<<<<<<<<<<< * * def _check_key(self, key): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_StableDict); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/item.pyx":73 * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * * def as_dict(self): # <<<<<<<<<<<<<< * """return the internal dictionary""" * return StableDict(self._dict) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.item.PropDict.as_dict", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":77 * return StableDict(self._dict) * * def _check_key(self, key): # <<<<<<<<<<<<<< * """make sure key is of type str and known""" * if not isinstance(key, str): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_13_check_key(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4borg_4item_8PropDict_12_check_key[] = "make sure key is of type str and known"; static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_13_check_key = {"_check_key", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_13_check_key, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4borg_4item_8PropDict_12_check_key}; static PyObject *__pyx_pw_4borg_4item_8PropDict_13_check_key(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_key = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_check_key (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_key,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_check_key", 1, 2, 2, 1); __PYX_ERR(0, 77, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_check_key") < 0)) __PYX_ERR(0, 77, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_key = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_check_key", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 77, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict._check_key", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_12_check_key(__pyx_self, __pyx_v_self, __pyx_v_key); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_12_check_key(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_check_key", 0); /* "borg/item.pyx":79 * def _check_key(self, key): * """make sure key is of type str and known""" * if not isinstance(key, str): # <<<<<<<<<<<<<< * raise TypeError("key must be str") * if key not in self.VALID_KEYS: */ __pyx_t_1 = PyUnicode_Check(__pyx_v_key); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "borg/item.pyx":80 * """make sure key is of type str and known""" * if not isinstance(key, str): * raise TypeError("key must be str") # <<<<<<<<<<<<<< * if key not in self.VALID_KEYS: * raise ValueError("key '%s' is not a valid key" % key) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 80, __pyx_L1_error) /* "borg/item.pyx":79 * def _check_key(self, key): * """make sure key is of type str and known""" * if not isinstance(key, str): # <<<<<<<<<<<<<< * raise TypeError("key must be str") * if key not in self.VALID_KEYS: */ } /* "borg/item.pyx":81 * if not isinstance(key, str): * raise TypeError("key must be str") * if key not in self.VALID_KEYS: # <<<<<<<<<<<<<< * raise ValueError("key '%s' is not a valid key" % key) * return key */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_VALID_KEYS); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = (__Pyx_PySequence_ContainsTF(__pyx_v_key, __pyx_t_3, Py_NE)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = (__pyx_t_2 != 0); if (unlikely(__pyx_t_1)) { /* "borg/item.pyx":82 * raise TypeError("key must be str") * if key not in self.VALID_KEYS: * raise ValueError("key '%s' is not a valid key" % key) # <<<<<<<<<<<<<< * return key * */ __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_key_s_is_not_a_valid_key, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 82, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 82, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 82, __pyx_L1_error) /* "borg/item.pyx":81 * if not isinstance(key, str): * raise TypeError("key must be str") * if key not in self.VALID_KEYS: # <<<<<<<<<<<<<< * raise ValueError("key '%s' is not a valid key" % key) * return key */ } /* "borg/item.pyx":83 * if key not in self.VALID_KEYS: * raise ValueError("key '%s' is not a valid key" % key) * return key # <<<<<<<<<<<<<< * * def __contains__(self, key): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_key); __pyx_r = __pyx_v_key; goto __pyx_L0; /* "borg/item.pyx":77 * return StableDict(self._dict) * * def _check_key(self, key): # <<<<<<<<<<<<<< * """make sure key is of type str and known""" * if not isinstance(key, str): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.item.PropDict._check_key", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":85 * return key * * def __contains__(self, key): # <<<<<<<<<<<<<< * """do we have this key?""" * return self._check_key(key) in self._dict */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_15__contains__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4borg_4item_8PropDict_14__contains__[] = "do we have this key?"; static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_15__contains__ = {"__contains__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_15__contains__, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4borg_4item_8PropDict_14__contains__}; static PyObject *__pyx_pw_4borg_4item_8PropDict_15__contains__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_key = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__contains__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_key,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__contains__", 1, 2, 2, 1); __PYX_ERR(0, 85, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__contains__") < 0)) __PYX_ERR(0, 85, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_key = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__contains__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 85, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict.__contains__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_14__contains__(__pyx_self, __pyx_v_self, __pyx_v_key); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_14__contains__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__contains__", 0); /* "borg/item.pyx":87 * def __contains__(self, key): * """do we have this key?""" * return self._check_key(key) in self._dict # <<<<<<<<<<<<<< * * def get(self, key, default=None): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_check_key); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_key) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_key); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "borg/item.pyx":85 * return key * * def __contains__(self, key): # <<<<<<<<<<<<<< * """do we have this key?""" * return self._check_key(key) in self._dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.item.PropDict.__contains__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":89 * return self._check_key(key) in self._dict * * def get(self, key, default=None): # <<<<<<<<<<<<<< * """get value for key, return default if key does not exist""" * return getattr(self, self._check_key(key), default) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_17get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4borg_4item_8PropDict_16get[] = "get value for key, return default if key does not exist"; static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_17get = {"get", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_17get, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4borg_4item_8PropDict_16get}; static PyObject *__pyx_pw_4borg_4item_8PropDict_17get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_key = 0; PyObject *__pyx_v_default = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_key,&__pyx_n_s_default,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("get", 0, 2, 3, 1); __PYX_ERR(0, 89, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_default); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "get") < 0)) __PYX_ERR(0, 89, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_key = values[1]; __pyx_v_default = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("get", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 89, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict.get", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_16get(__pyx_self, __pyx_v_self, __pyx_v_key, __pyx_v_default); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_16get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_default) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get", 0); /* "borg/item.pyx":91 * def get(self, key, default=None): * """get value for key, return default if key does not exist""" * return getattr(self, self._check_key(key), default) # <<<<<<<<<<<<<< * * @staticmethod */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_check_key); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_key) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_key); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetAttr3(__pyx_v_self, __pyx_t_1, __pyx_v_default); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "borg/item.pyx":89 * return self._check_key(key) in self._dict * * def get(self, key, default=None): # <<<<<<<<<<<<<< * """get value for key, return default if key does not exist""" * return getattr(self, self._check_key(key), default) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.item.PropDict.get", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":94 * * @staticmethod * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): # <<<<<<<<<<<<<< * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_19_make_property(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4borg_4item_8PropDict_18_make_property[] = "return a property that deals with self._dict[key]"; static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_19_make_property = {"_make_property", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_19_make_property, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4borg_4item_8PropDict_18_make_property}; static PyObject *__pyx_pw_4borg_4item_8PropDict_19_make_property(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_key = 0; PyObject *__pyx_v_value_type = 0; PyObject *__pyx_v_value_type_name = 0; PyObject *__pyx_v_encode = 0; PyObject *__pyx_v_decode = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_make_property (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_key,&__pyx_n_s_value_type,&__pyx_n_s_value_type_name,&__pyx_n_s_encode,&__pyx_n_s_decode,0}; PyObject* values[5] = {0,0,0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_None)); values[3] = ((PyObject *)((PyObject *)Py_None)); values[4] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value_type)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_make_property", 0, 2, 5, 1); __PYX_ERR(0, 94, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value_type_name); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_encode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_decode); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_make_property") < 0)) __PYX_ERR(0, 94, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_key = values[0]; __pyx_v_value_type = values[1]; __pyx_v_value_type_name = values[2]; __pyx_v_encode = values[3]; __pyx_v_decode = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_make_property", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 94, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict._make_property", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_18_make_property(__pyx_self, __pyx_v_key, __pyx_v_value_type, __pyx_v_value_type_name, __pyx_v_encode, __pyx_v_decode); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":103 * attr_error_msg = "attribute %s not found" % key * * def _get(self): # <<<<<<<<<<<<<< * try: * value = self._dict[key] */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_14_make_property_1_get(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_14_make_property_1_get = {"_get", (PyCFunction)__pyx_pw_4borg_4item_8PropDict_14_make_property_1_get, METH_O, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_14_make_property_1_get(PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_get (wrapper)", 0); __pyx_r = __pyx_pf_4borg_4item_8PropDict_14_make_property__get(__pyx_self, ((PyObject *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_14_make_property__get(PyObject *__pyx_self, PyObject *__pyx_v_self) { struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_cur_scope; struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_outer_scope; PyObject *__pyx_v_value = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_get", 0); __pyx_outer_scope = (struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; /* "borg/item.pyx":104 * * def _get(self): * try: # <<<<<<<<<<<<<< * value = self._dict[key] * except KeyError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "borg/item.pyx":105 * def _get(self): * try: * value = self._dict[key] # <<<<<<<<<<<<<< * except KeyError: * raise AttributeError(attr_error_msg) from None */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 105, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); if (unlikely(!__pyx_cur_scope->__pyx_v_key)) { __Pyx_RaiseClosureNameError("key"); __PYX_ERR(0, 105, __pyx_L3_error) } __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_cur_scope->__pyx_v_key); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 105, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_value = __pyx_t_5; __pyx_t_5 = 0; /* "borg/item.pyx":104 * * def _get(self): * try: # <<<<<<<<<<<<<< * value = self._dict[key] * except KeyError: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/item.pyx":106 * try: * value = self._dict[key] * except KeyError: # <<<<<<<<<<<<<< * raise AttributeError(attr_error_msg) from None * if decode is not None: */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_KeyError); if (__pyx_t_6) { __Pyx_AddTraceback("borg.item.PropDict._make_property._get", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_4, &__pyx_t_7) < 0) __PYX_ERR(0, 106, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_7); /* "borg/item.pyx":107 * value = self._dict[key] * except KeyError: * raise AttributeError(attr_error_msg) from None # <<<<<<<<<<<<<< * if decode is not None: * value = decode(value) */ if (unlikely(!__pyx_cur_scope->__pyx_v_attr_error_msg)) { __Pyx_RaiseClosureNameError("attr_error_msg"); __PYX_ERR(0, 107, __pyx_L5_except_error) } __pyx_t_8 = __Pyx_PyObject_CallOneArg(__pyx_builtin_AttributeError, __pyx_cur_scope->__pyx_v_attr_error_msg); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 107, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, Py_None); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 107, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "borg/item.pyx":104 * * def _get(self): * try: # <<<<<<<<<<<<<< * value = self._dict[key] * except KeyError: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "borg/item.pyx":108 * except KeyError: * raise AttributeError(attr_error_msg) from None * if decode is not None: # <<<<<<<<<<<<<< * value = decode(value) * return value */ if (unlikely(!__pyx_cur_scope->__pyx_v_decode)) { __Pyx_RaiseClosureNameError("decode"); __PYX_ERR(0, 108, __pyx_L1_error) } __pyx_t_9 = (__pyx_cur_scope->__pyx_v_decode != Py_None); __pyx_t_10 = (__pyx_t_9 != 0); if (__pyx_t_10) { /* "borg/item.pyx":109 * raise AttributeError(attr_error_msg) from None * if decode is not None: * value = decode(value) # <<<<<<<<<<<<<< * return value * */ if (unlikely(!__pyx_cur_scope->__pyx_v_decode)) { __Pyx_RaiseClosureNameError("decode"); __PYX_ERR(0, 109, __pyx_L1_error) } __Pyx_INCREF(__pyx_cur_scope->__pyx_v_decode); __pyx_t_4 = __pyx_cur_scope->__pyx_v_decode; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_7 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_value) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_value); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_value, __pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":108 * except KeyError: * raise AttributeError(attr_error_msg) from None * if decode is not None: # <<<<<<<<<<<<<< * value = decode(value) * return value */ } /* "borg/item.pyx":110 * if decode is not None: * value = decode(value) * return value # <<<<<<<<<<<<<< * * def _set(self, value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_value); __pyx_r = __pyx_v_value; goto __pyx_L0; /* "borg/item.pyx":103 * attr_error_msg = "attribute %s not found" % key * * def _get(self): # <<<<<<<<<<<<<< * try: * value = self._dict[key] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("borg.item.PropDict._make_property._get", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_value); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":112 * return value * * def _set(self, value): # <<<<<<<<<<<<<< * if not isinstance(value, value_type): * raise TypeError(type_error_msg) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_14_make_property_3_set(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_14_make_property_3_set = {"_set", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_8PropDict_14_make_property_3_set, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_14_make_property_3_set(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_value = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_set (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_value,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_set", 1, 2, 2, 1); __PYX_ERR(0, 112, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_set") < 0)) __PYX_ERR(0, 112, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_value = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_set", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 112, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.PropDict._make_property._set", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_8PropDict_14_make_property_2_set(__pyx_self, __pyx_v_self, __pyx_v_value); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_14_make_property_2_set(PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_value) { struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_cur_scope; struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_set", 0); __pyx_outer_scope = (struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_INCREF(__pyx_v_value); /* "borg/item.pyx":113 * * def _set(self, value): * if not isinstance(value, value_type): # <<<<<<<<<<<<<< * raise TypeError(type_error_msg) * if encode is not None: */ if (unlikely(!__pyx_cur_scope->__pyx_v_value_type)) { __Pyx_RaiseClosureNameError("value_type"); __PYX_ERR(0, 113, __pyx_L1_error) } __pyx_t_1 = __pyx_cur_scope->__pyx_v_value_type; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = PyObject_IsInstance(__pyx_v_value, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = ((!(__pyx_t_2 != 0)) != 0); if (unlikely(__pyx_t_3)) { /* "borg/item.pyx":114 * def _set(self, value): * if not isinstance(value, value_type): * raise TypeError(type_error_msg) # <<<<<<<<<<<<<< * if encode is not None: * value = encode(value) */ if (unlikely(!__pyx_cur_scope->__pyx_v_type_error_msg)) { __Pyx_RaiseClosureNameError("type_error_msg"); __PYX_ERR(0, 114, __pyx_L1_error) } __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_cur_scope->__pyx_v_type_error_msg); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 114, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 114, __pyx_L1_error) /* "borg/item.pyx":113 * * def _set(self, value): * if not isinstance(value, value_type): # <<<<<<<<<<<<<< * raise TypeError(type_error_msg) * if encode is not None: */ } /* "borg/item.pyx":115 * if not isinstance(value, value_type): * raise TypeError(type_error_msg) * if encode is not None: # <<<<<<<<<<<<<< * value = encode(value) * self._dict[key] = value */ if (unlikely(!__pyx_cur_scope->__pyx_v_encode)) { __Pyx_RaiseClosureNameError("encode"); __PYX_ERR(0, 115, __pyx_L1_error) } __pyx_t_3 = (__pyx_cur_scope->__pyx_v_encode != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "borg/item.pyx":116 * raise TypeError(type_error_msg) * if encode is not None: * value = encode(value) # <<<<<<<<<<<<<< * self._dict[key] = value * */ if (unlikely(!__pyx_cur_scope->__pyx_v_encode)) { __Pyx_RaiseClosureNameError("encode"); __PYX_ERR(0, 116, __pyx_L1_error) } __Pyx_INCREF(__pyx_cur_scope->__pyx_v_encode); __pyx_t_4 = __pyx_cur_scope->__pyx_v_encode; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_value) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_value); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_value, __pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":115 * if not isinstance(value, value_type): * raise TypeError(type_error_msg) * if encode is not None: # <<<<<<<<<<<<<< * value = encode(value) * self._dict[key] = value */ } /* "borg/item.pyx":117 * if encode is not None: * value = encode(value) * self._dict[key] = value # <<<<<<<<<<<<<< * * def _del(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(!__pyx_cur_scope->__pyx_v_key)) { __Pyx_RaiseClosureNameError("key"); __PYX_ERR(0, 117, __pyx_L1_error) } if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_cur_scope->__pyx_v_key, __pyx_v_value) < 0)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":112 * return value * * def _set(self, value): # <<<<<<<<<<<<<< * if not isinstance(value, value_type): * raise TypeError(type_error_msg) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.item.PropDict._make_property._set", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_value); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":119 * self._dict[key] = value * * def _del(self): # <<<<<<<<<<<<<< * try: * del self._dict[key] */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_8PropDict_14_make_property_5_del(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/ static PyMethodDef __pyx_mdef_4borg_4item_8PropDict_14_make_property_5_del = {"_del", (PyCFunction)__pyx_pw_4borg_4item_8PropDict_14_make_property_5_del, METH_O, 0}; static PyObject *__pyx_pw_4borg_4item_8PropDict_14_make_property_5_del(PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_del (wrapper)", 0); __pyx_r = __pyx_pf_4borg_4item_8PropDict_14_make_property_4_del(__pyx_self, ((PyObject *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_4item_8PropDict_14_make_property_4_del(PyObject *__pyx_self, PyObject *__pyx_v_self) { struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_cur_scope; struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_del", 0); __pyx_outer_scope = (struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; /* "borg/item.pyx":120 * * def _del(self): * try: # <<<<<<<<<<<<<< * del self._dict[key] * except KeyError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "borg/item.pyx":121 * def _del(self): * try: * del self._dict[key] # <<<<<<<<<<<<<< * except KeyError: * raise AttributeError(attr_error_msg) from None */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 121, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); if (unlikely(!__pyx_cur_scope->__pyx_v_key)) { __Pyx_RaiseClosureNameError("key"); __PYX_ERR(0, 121, __pyx_L3_error) } if (unlikely(PyObject_DelItem(__pyx_t_4, __pyx_cur_scope->__pyx_v_key) < 0)) __PYX_ERR(0, 121, __pyx_L3_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/item.pyx":120 * * def _del(self): * try: # <<<<<<<<<<<<<< * del self._dict[key] * except KeyError: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/item.pyx":122 * try: * del self._dict[key] * except KeyError: # <<<<<<<<<<<<<< * raise AttributeError(attr_error_msg) from None * */ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_KeyError); if (__pyx_t_5) { __Pyx_AddTraceback("borg.item.PropDict._make_property._del", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 122, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "borg/item.pyx":123 * del self._dict[key] * except KeyError: * raise AttributeError(attr_error_msg) from None # <<<<<<<<<<<<<< * * return property(_get, _set, _del, doc=doc) */ if (unlikely(!__pyx_cur_scope->__pyx_v_attr_error_msg)) { __Pyx_RaiseClosureNameError("attr_error_msg"); __PYX_ERR(0, 123, __pyx_L5_except_error) } __pyx_t_8 = __Pyx_PyObject_CallOneArg(__pyx_builtin_AttributeError, __pyx_cur_scope->__pyx_v_attr_error_msg); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 123, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, Py_None); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 123, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "borg/item.pyx":120 * * def _del(self): * try: # <<<<<<<<<<<<<< * del self._dict[key] * except KeyError: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "borg/item.pyx":119 * self._dict[key] = value * * def _del(self): # <<<<<<<<<<<<<< * try: * del self._dict[key] */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("borg.item.PropDict._make_property._del", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":94 * * @staticmethod * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): # <<<<<<<<<<<<<< * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) */ static PyObject *__pyx_pf_4borg_4item_8PropDict_18_make_property(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value_type, PyObject *__pyx_v_value_type_name, PyObject *__pyx_v_encode, PyObject *__pyx_v_decode) { struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_cur_scope; PyObject *__pyx_v_doc = NULL; PyObject *__pyx_v__get = 0; PyObject *__pyx_v__set = 0; PyObject *__pyx_v__del = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_UCS4 __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_make_property", 0); __pyx_cur_scope = (struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *)__pyx_tp_new_4borg_4item___pyx_scope_struct___make_property(__pyx_ptype_4borg_4item___pyx_scope_struct___make_property, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 94, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } __pyx_cur_scope->__pyx_v_key = __pyx_v_key; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_key); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_key); __pyx_cur_scope->__pyx_v_value_type = __pyx_v_value_type; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_value_type); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_value_type); __pyx_cur_scope->__pyx_v_encode = __pyx_v_encode; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_encode); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_encode); __pyx_cur_scope->__pyx_v_decode = __pyx_v_decode; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_decode); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_decode); __Pyx_INCREF(__pyx_v_value_type_name); /* "borg/item.pyx":96 * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) # <<<<<<<<<<<<<< * if value_type_name is None: * value_type_name = value_type.__name__ */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = __pyx_cur_scope->__pyx_v_key; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = PyUnicode_Check(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!(__pyx_t_2 != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 96, __pyx_L1_error) } } #endif /* "borg/item.pyx":97 * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) * if value_type_name is None: # <<<<<<<<<<<<<< * value_type_name = value_type.__name__ * doc = "%s (%s)" % (key, value_type_name) */ __pyx_t_2 = (__pyx_v_value_type_name == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "borg/item.pyx":98 * assert isinstance(key, str) * if value_type_name is None: * value_type_name = value_type.__name__ # <<<<<<<<<<<<<< * doc = "%s (%s)" % (key, value_type_name) * type_error_msg = "%s value must be %s" % (key, value_type_name) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_value_type, __pyx_n_s_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_value_type_name, __pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":97 * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) * if value_type_name is None: # <<<<<<<<<<<<<< * value_type_name = value_type.__name__ * doc = "%s (%s)" % (key, value_type_name) */ } /* "borg/item.pyx":99 * if value_type_name is None: * value_type_name = value_type.__name__ * doc = "%s (%s)" % (key, value_type_name) # <<<<<<<<<<<<<< * type_error_msg = "%s value must be %s" % (key, value_type_name) * attr_error_msg = "attribute %s not found" % key */ __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = 0; __pyx_t_5 = 127; __pyx_t_6 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Unicode(__pyx_cur_scope->__pyx_v_key), __pyx_empty_unicode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) > __pyx_t_5) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) : __pyx_t_5; __pyx_t_4 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); __pyx_t_6 = 0; __Pyx_INCREF(__pyx_kp_u__5); __pyx_t_4 += 2; __Pyx_GIVEREF(__pyx_kp_u__5); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_kp_u__5); __pyx_t_6 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Unicode(__pyx_v_value_type_name), __pyx_empty_unicode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) > __pyx_t_5) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) : __pyx_t_5; __pyx_t_4 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_6); __pyx_t_6 = 0; __Pyx_INCREF(__pyx_kp_u__3); __pyx_t_4 += 1; __Pyx_GIVEREF(__pyx_kp_u__3); PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_kp_u__3); __pyx_t_6 = __Pyx_PyUnicode_Join(__pyx_t_1, 4, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_doc = ((PyObject*)__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":100 * value_type_name = value_type.__name__ * doc = "%s (%s)" % (key, value_type_name) * type_error_msg = "%s value must be %s" % (key, value_type_name) # <<<<<<<<<<<<<< * attr_error_msg = "attribute %s not found" % key * */ __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = 0; __pyx_t_5 = 127; __pyx_t_1 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Unicode(__pyx_cur_scope->__pyx_v_key), __pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) > __pyx_t_5) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) : __pyx_t_5; __pyx_t_4 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __pyx_t_1 = 0; __Pyx_INCREF(__pyx_kp_u_value_must_be); __pyx_t_4 += 15; __Pyx_GIVEREF(__pyx_kp_u_value_must_be); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_kp_u_value_must_be); __pyx_t_1 = __Pyx_PyObject_FormatSimpleAndDecref(PyObject_Unicode(__pyx_v_value_type_name), __pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) > __pyx_t_5) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) : __pyx_t_5; __pyx_t_4 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_6, 3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GIVEREF(__pyx_t_1); __pyx_cur_scope->__pyx_v_type_error_msg = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":101 * doc = "%s (%s)" % (key, value_type_name) * type_error_msg = "%s value must be %s" % (key, value_type_name) * attr_error_msg = "attribute %s not found" % key # <<<<<<<<<<<<<< * * def _get(self): */ __pyx_t_1 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_attribute_s_not_found, __pyx_cur_scope->__pyx_v_key); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_cur_scope->__pyx_v_attr_error_msg = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":103 * attr_error_msg = "attribute %s not found" % key * * def _get(self): # <<<<<<<<<<<<<< * try: * value = self._dict[key] */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_14_make_property_1_get, 0, __pyx_n_s_PropDict__make_property_locals, ((PyObject*)__pyx_cur_scope), __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__7)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__get = __pyx_t_1; __pyx_t_1 = 0; /* "borg/item.pyx":112 * return value * * def _set(self, value): # <<<<<<<<<<<<<< * if not isinstance(value, value_type): * raise TypeError(type_error_msg) */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_14_make_property_3_set, 0, __pyx_n_s_PropDict__make_property_locals_2, ((PyObject*)__pyx_cur_scope), __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__9)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__set = __pyx_t_1; __pyx_t_1 = 0; /* "borg/item.pyx":119 * self._dict[key] = value * * def _del(self): # <<<<<<<<<<<<<< * try: * del self._dict[key] */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_14_make_property_5_del, 0, __pyx_n_s_PropDict__make_property_locals_3, ((PyObject*)__pyx_cur_scope), __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__11)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 119, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__del = __pyx_t_1; __pyx_t_1 = 0; /* "borg/item.pyx":125 * raise AttributeError(attr_error_msg) from None * * return property(_get, _set, _del, doc=doc) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__get); __Pyx_GIVEREF(__pyx_v__get); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__get); __Pyx_INCREF(__pyx_v__set); __Pyx_GIVEREF(__pyx_v__set); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v__set); __Pyx_INCREF(__pyx_v__del); __Pyx_GIVEREF(__pyx_v__del); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v__del); __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_doc, __pyx_v_doc) < 0) __PYX_ERR(0, 125, __pyx_L1_error) __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "borg/item.pyx":94 * * @staticmethod * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): # <<<<<<<<<<<<<< * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("borg.item.PropDict._make_property", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_doc); __Pyx_XDECREF(__pyx_v__get); __Pyx_XDECREF(__pyx_v__set); __Pyx_XDECREF(__pyx_v__del); __Pyx_XDECREF(__pyx_v_value_type_name); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":191 * part = PropDict._make_property('part', int) * * def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): # <<<<<<<<<<<<<< * """ * Determine the (uncompressed or compressed) size of this item. */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_4item_4Item_1get_size(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4borg_4item_4Item_get_size[] = "\n Determine the (uncompressed or compressed) size of this item.\n\n For hardlink slaves, the size is computed via the hardlink master's\n chunk list, if available (otherwise size will be returned as 0).\n\n If memorize is True, the computed size value will be stored into the item.\n "; static PyMethodDef __pyx_mdef_4borg_4item_4Item_1get_size = {"get_size", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_4item_4Item_1get_size, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4borg_4item_4Item_get_size}; static PyObject *__pyx_pw_4borg_4item_4Item_1get_size(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_hardlink_masters = 0; PyObject *__pyx_v_memorize = 0; PyObject *__pyx_v_compressed = 0; PyObject *__pyx_v_from_chunks = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get_size (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_hardlink_masters,&__pyx_n_s_memorize,&__pyx_n_s_compressed,&__pyx_n_s_from_chunks,0}; PyObject* values[5] = {0,0,0,0,0}; values[1] = ((PyObject *)((PyObject *)Py_None)); values[2] = ((PyObject *)((PyObject *)Py_False)); values[3] = ((PyObject *)((PyObject *)Py_False)); values[4] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_hardlink_masters); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_memorize); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_compressed); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_from_chunks); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "get_size") < 0)) __PYX_ERR(0, 191, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_hardlink_masters = values[1]; __pyx_v_memorize = values[2]; __pyx_v_compressed = values[3]; __pyx_v_from_chunks = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("get_size", 0, 1, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 191, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.item.Item.get_size", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_4item_4Item_get_size(__pyx_self, __pyx_v_self, __pyx_v_hardlink_masters, __pyx_v_memorize, __pyx_v_compressed, __pyx_v_from_chunks); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4borg_4item_4Item_8get_size_2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "borg/item.pyx":233 * if chunks is None: * return 0 * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) # <<<<<<<<<<<<<< * # if requested, memorize the precomputed (c)size for items that have an own chunks list: * if memorize and having_chunks: */ static PyObject *__pyx_pf_4borg_4item_4Item_8get_size_genexpr(PyObject *__pyx_self) { struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *)__pyx_tp_new_4borg_4item___pyx_scope_struct_2_genexpr(__pyx_ptype_4borg_4item___pyx_scope_struct_2_genexpr, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 233, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *) __pyx_self; __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_outer_scope)); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4borg_4item_4Item_8get_size_2generator, NULL, (PyObject *) __pyx_cur_scope, __pyx_n_s_genexpr, __pyx_n_s_Item_get_size_locals_genexpr, __pyx_n_s_borg_item); if (unlikely(!gen)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.item.Item.get_size.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4borg_4item_4Item_8get_size_2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; case 1: goto __pyx_L6_resume_from_yield; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 233, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_chunks)) { __Pyx_RaiseClosureNameError("chunks"); __PYX_ERR(0, 233, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_chunks)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_chunks)) { __pyx_t_1 = __pyx_cur_scope->__pyx_outer_scope->__pyx_v_chunks; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_chunks); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 233, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 233, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 233, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 233, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_4); } __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_chunk); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_chunk, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_ChunkListEntry); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PySequence_Tuple(__pyx_cur_scope->__pyx_v_chunk); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_attr)) { __Pyx_RaiseClosureNameError("attr"); __PYX_ERR(0, 233, __pyx_L1_error) } __pyx_t_5 = __pyx_cur_scope->__pyx_outer_scope->__pyx_v_attr; __Pyx_INCREF(__pyx_t_5); __pyx_t_4 = __Pyx_GetAttr(__pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; __Pyx_XGIVEREF(__pyx_t_1); __pyx_cur_scope->__pyx_t_0 = __pyx_t_1; __pyx_cur_scope->__pyx_t_1 = __pyx_t_2; __pyx_cur_scope->__pyx_t_2 = __pyx_t_3; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); __Pyx_Coroutine_ResetAndClearException(__pyx_generator); /* return from generator, yielding value */ __pyx_generator->resume_label = 1; return __pyx_r; __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_cur_scope->__pyx_t_0 = 0; __Pyx_XGOTREF(__pyx_t_1); __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; __pyx_t_3 = __pyx_cur_scope->__pyx_t_2; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 233, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_L0:; __Pyx_XDECREF(__pyx_r); __pyx_r = 0; #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/item.pyx":191 * part = PropDict._make_property('part', int) * * def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): # <<<<<<<<<<<<<< * """ * Determine the (uncompressed or compressed) size of this item. */ static PyObject *__pyx_pf_4borg_4item_4Item_get_size(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_hardlink_masters, PyObject *__pyx_v_memorize, PyObject *__pyx_v_compressed, PyObject *__pyx_v_from_chunks) { struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *__pyx_cur_scope; PyObject *__pyx_v_size = NULL; int __pyx_v_having_chunks; PyObject *__pyx_v_master = NULL; CYTHON_UNUSED PyObject *__pyx_v__ = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; PyObject *__pyx_t_13 = NULL; Py_ssize_t __pyx_t_14; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; PyObject *__pyx_t_18 = NULL; PyObject *__pyx_t_19 = NULL; PyObject *__pyx_t_20 = NULL; PyObject *__pyx_t_21 = NULL; PyObject *__pyx_t_22 = NULL; PyObject *__pyx_t_23 = NULL; PyObject *(*__pyx_t_24)(PyObject *); int __pyx_t_25; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_size", 0); __pyx_cur_scope = (struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *)__pyx_tp_new_4borg_4item___pyx_scope_struct_1_get_size(__pyx_ptype_4borg_4item___pyx_scope_struct_1_get_size, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 191, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } /* "borg/item.pyx":200 * If memorize is True, the computed size value will be stored into the item. * """ * attr = 'csize' if compressed else 'size' # <<<<<<<<<<<<<< * assert not (compressed and memorize), 'Item does not have a csize field.' * try: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_compressed); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 200, __pyx_L1_error) if (__pyx_t_2) { __Pyx_INCREF(__pyx_n_u_csize); __pyx_t_1 = __pyx_n_u_csize; } else { __Pyx_INCREF(__pyx_n_u_size); __pyx_t_1 = __pyx_n_u_size; } __Pyx_GIVEREF(__pyx_t_1); __pyx_cur_scope->__pyx_v_attr = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":201 * """ * attr = 'csize' if compressed else 'size' * assert not (compressed and memorize), 'Item does not have a csize field.' # <<<<<<<<<<<<<< * try: * if from_chunks: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_compressed); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 201, __pyx_L1_error) if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L3_bool_binop_done; } __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_memorize); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 201, __pyx_L1_error) __pyx_t_2 = __pyx_t_3; __pyx_L3_bool_binop_done:; if (unlikely(!((!__pyx_t_2) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_Item_does_not_have_a_csize_field); __PYX_ERR(0, 201, __pyx_L1_error) } } #endif /* "borg/item.pyx":202 * attr = 'csize' if compressed else 'size' * assert not (compressed and memorize), 'Item does not have a csize field.' * try: # <<<<<<<<<<<<<< * if from_chunks: * raise AttributeError */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_6); /*try:*/ { /* "borg/item.pyx":203 * assert not (compressed and memorize), 'Item does not have a csize field.' * try: * if from_chunks: # <<<<<<<<<<<<<< * raise AttributeError * size = getattr(self, attr) */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_from_chunks); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 203, __pyx_L5_error) if (unlikely(__pyx_t_2)) { /* "borg/item.pyx":204 * try: * if from_chunks: * raise AttributeError # <<<<<<<<<<<<<< * size = getattr(self, attr) * except AttributeError: */ __Pyx_Raise(__pyx_builtin_AttributeError, 0, 0, 0); __PYX_ERR(0, 204, __pyx_L5_error) /* "borg/item.pyx":203 * assert not (compressed and memorize), 'Item does not have a csize field.' * try: * if from_chunks: # <<<<<<<<<<<<<< * raise AttributeError * size = getattr(self, attr) */ } /* "borg/item.pyx":205 * if from_chunks: * raise AttributeError * size = getattr(self, attr) # <<<<<<<<<<<<<< * except AttributeError: * if stat.S_ISLNK(self.mode): */ __pyx_t_1 = __pyx_cur_scope->__pyx_v_attr; __Pyx_INCREF(__pyx_t_1); __pyx_t_7 = __Pyx_GetAttr(__pyx_v_self, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 205, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_size = __pyx_t_7; __pyx_t_7 = 0; /* "borg/item.pyx":202 * attr = 'csize' if compressed else 'size' * assert not (compressed and memorize), 'Item does not have a csize field.' * try: # <<<<<<<<<<<<<< * if from_chunks: * raise AttributeError */ } __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L10_try_end; __pyx_L5_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":206 * raise AttributeError * size = getattr(self, attr) * except AttributeError: # <<<<<<<<<<<<<< * if stat.S_ISLNK(self.mode): * # get out of here quickly. symlinks have no own chunks, their fs size is the length of the target name. */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_AttributeError); if (__pyx_t_8) { __Pyx_AddTraceback("borg.item.Item.get_size", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_1, &__pyx_t_9) < 0) __PYX_ERR(0, 206, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_9); /* "borg/item.pyx":207 * size = getattr(self, attr) * except AttributeError: * if stat.S_ISLNK(self.mode): # <<<<<<<<<<<<<< * # get out of here quickly. symlinks have no own chunks, their fs size is the length of the target name. * # also, there is the dual-use issue of .source (#2343), so don't confuse it with a hardlink slave. */ __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_n_s_stat); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 207, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_S_ISLNK); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 207, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_mode); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 207, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_13 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_12))) { __pyx_t_13 = PyMethod_GET_SELF(__pyx_t_12); if (likely(__pyx_t_13)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_12); __Pyx_INCREF(__pyx_t_13); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_12, function); } } __pyx_t_10 = (__pyx_t_13) ? __Pyx_PyObject_Call2Args(__pyx_t_12, __pyx_t_13, __pyx_t_11) : __Pyx_PyObject_CallOneArg(__pyx_t_12, __pyx_t_11); __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 207, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 207, __pyx_L7_except_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (__pyx_t_2) { /* "borg/item.pyx":210 * # get out of here quickly. symlinks have no own chunks, their fs size is the length of the target name. * # also, there is the dual-use issue of .source (#2343), so don't confuse it with a hardlink slave. * return len(self.source) # <<<<<<<<<<<<<< * # no precomputed (c)size value available, compute it: * try: */ __Pyx_XDECREF(__pyx_r); __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_source); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 210, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_14 = PyObject_Length(__pyx_t_10); if (unlikely(__pyx_t_14 == ((Py_ssize_t)-1))) __PYX_ERR(0, 210, __pyx_L7_except_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyInt_FromSsize_t(__pyx_t_14); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 210, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_10); __pyx_r = __pyx_t_10; __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L8_except_return; /* "borg/item.pyx":207 * size = getattr(self, attr) * except AttributeError: * if stat.S_ISLNK(self.mode): # <<<<<<<<<<<<<< * # get out of here quickly. symlinks have no own chunks, their fs size is the length of the target name. * # also, there is the dual-use issue of .source (#2343), so don't confuse it with a hardlink slave. */ } /* "borg/item.pyx":212 * return len(self.source) * # no precomputed (c)size value available, compute it: * try: # <<<<<<<<<<<<<< * chunks = getattr(self, 'chunks') * having_chunks = True */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17); __Pyx_XGOTREF(__pyx_t_15); __Pyx_XGOTREF(__pyx_t_16); __Pyx_XGOTREF(__pyx_t_17); /*try:*/ { /* "borg/item.pyx":213 * # no precomputed (c)size value available, compute it: * try: * chunks = getattr(self, 'chunks') # <<<<<<<<<<<<<< * having_chunks = True * except AttributeError: */ __pyx_t_10 = __Pyx_GetAttr(__pyx_v_self, __pyx_n_u_chunks); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 213, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_10); __pyx_cur_scope->__pyx_v_chunks = __pyx_t_10; __pyx_t_10 = 0; /* "borg/item.pyx":214 * try: * chunks = getattr(self, 'chunks') * having_chunks = True # <<<<<<<<<<<<<< * except AttributeError: * having_chunks = False */ __pyx_v_having_chunks = 1; /* "borg/item.pyx":212 * return len(self.source) * # no precomputed (c)size value available, compute it: * try: # <<<<<<<<<<<<<< * chunks = getattr(self, 'chunks') * having_chunks = True */ } __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0; __Pyx_XDECREF(__pyx_t_17); __pyx_t_17 = 0; goto __pyx_L22_try_end; __pyx_L15_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; /* "borg/item.pyx":215 * chunks = getattr(self, 'chunks') * having_chunks = True * except AttributeError: # <<<<<<<<<<<<<< * having_chunks = False * # this item has no (own) chunks list, but if this is a hardlink slave */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_AttributeError); if (__pyx_t_8) { __Pyx_AddTraceback("borg.item.Item.get_size", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_10, &__pyx_t_12, &__pyx_t_11) < 0) __PYX_ERR(0, 215, __pyx_L17_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GOTREF(__pyx_t_12); __Pyx_GOTREF(__pyx_t_11); /* "borg/item.pyx":216 * having_chunks = True * except AttributeError: * having_chunks = False # <<<<<<<<<<<<<< * # this item has no (own) chunks list, but if this is a hardlink slave * # and we know the master, we can still compute the size. */ __pyx_v_having_chunks = 0; /* "borg/item.pyx":219 * # this item has no (own) chunks list, but if this is a hardlink slave * # and we know the master, we can still compute the size. * if hardlink_masters is None: # <<<<<<<<<<<<<< * chunks = None * else: */ __pyx_t_2 = (__pyx_v_hardlink_masters == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "borg/item.pyx":220 * # and we know the master, we can still compute the size. * if hardlink_masters is None: * chunks = None # <<<<<<<<<<<<<< * else: * try: */ __Pyx_INCREF(Py_None); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_chunks); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_chunks, Py_None); __Pyx_GIVEREF(Py_None); /* "borg/item.pyx":219 * # this item has no (own) chunks list, but if this is a hardlink slave * # and we know the master, we can still compute the size. * if hardlink_masters is None: # <<<<<<<<<<<<<< * chunks = None * else: */ goto __pyx_L25; } /* "borg/item.pyx":222 * chunks = None * else: * try: # <<<<<<<<<<<<<< * master = getattr(self, 'source') * except AttributeError: */ /*else*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20); __Pyx_XGOTREF(__pyx_t_18); __Pyx_XGOTREF(__pyx_t_19); __Pyx_XGOTREF(__pyx_t_20); /*try:*/ { /* "borg/item.pyx":223 * else: * try: * master = getattr(self, 'source') # <<<<<<<<<<<<<< * except AttributeError: * # not a hardlink slave, likely a directory or special file w/o chunks */ __pyx_t_13 = __Pyx_GetAttr(__pyx_v_self, __pyx_n_u_source); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 223, __pyx_L26_error) __Pyx_GOTREF(__pyx_t_13); __pyx_v_master = __pyx_t_13; __pyx_t_13 = 0; /* "borg/item.pyx":222 * chunks = None * else: * try: # <<<<<<<<<<<<<< * master = getattr(self, 'source') * except AttributeError: */ } /* "borg/item.pyx":230 * # hardlink slave, try to fetch hardlink master's chunks list * # todo: put precomputed size into hardlink_masters' values and use it, if present * chunks, _ = hardlink_masters.get(master, (None, None)) # <<<<<<<<<<<<<< * if chunks is None: * return 0 */ /*else:*/ { __pyx_t_21 = __Pyx_PyObject_GetAttrStr(__pyx_v_hardlink_masters, __pyx_n_s_get_2); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_GOTREF(__pyx_t_21); __pyx_t_22 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_21))) { __pyx_t_22 = PyMethod_GET_SELF(__pyx_t_21); if (likely(__pyx_t_22)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_21); __Pyx_INCREF(__pyx_t_22); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_21, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_21)) { PyObject *__pyx_temp[3] = {__pyx_t_22, __pyx_v_master, __pyx_tuple__12}; __pyx_t_13 = __Pyx_PyFunction_FastCall(__pyx_t_21, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_XDECREF(__pyx_t_22); __pyx_t_22 = 0; __Pyx_GOTREF(__pyx_t_13); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_21)) { PyObject *__pyx_temp[3] = {__pyx_t_22, __pyx_v_master, __pyx_tuple__12}; __pyx_t_13 = __Pyx_PyCFunction_FastCall(__pyx_t_21, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_XDECREF(__pyx_t_22); __pyx_t_22 = 0; __Pyx_GOTREF(__pyx_t_13); } else #endif { __pyx_t_23 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_23)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_GOTREF(__pyx_t_23); if (__pyx_t_22) { __Pyx_GIVEREF(__pyx_t_22); PyTuple_SET_ITEM(__pyx_t_23, 0, __pyx_t_22); __pyx_t_22 = NULL; } __Pyx_INCREF(__pyx_v_master); __Pyx_GIVEREF(__pyx_v_master); PyTuple_SET_ITEM(__pyx_t_23, 0+__pyx_t_8, __pyx_v_master); __Pyx_INCREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); PyTuple_SET_ITEM(__pyx_t_23, 1+__pyx_t_8, __pyx_tuple__12); __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_21, __pyx_t_23, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_23); __pyx_t_23 = 0; } __Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_13))) || (PyList_CheckExact(__pyx_t_13))) { PyObject* sequence = __pyx_t_13; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 230, __pyx_L28_except_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_21 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_23 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_21 = PyList_GET_ITEM(sequence, 0); __pyx_t_23 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_21); __Pyx_INCREF(__pyx_t_23); #else __pyx_t_21 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_GOTREF(__pyx_t_21); __pyx_t_23 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_23)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_GOTREF(__pyx_t_23); #endif __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } else { Py_ssize_t index = -1; __pyx_t_22 = PyObject_GetIter(__pyx_t_13); if (unlikely(!__pyx_t_22)) __PYX_ERR(0, 230, __pyx_L28_except_error) __Pyx_GOTREF(__pyx_t_22); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_24 = Py_TYPE(__pyx_t_22)->tp_iternext; index = 0; __pyx_t_21 = __pyx_t_24(__pyx_t_22); if (unlikely(!__pyx_t_21)) goto __pyx_L34_unpacking_failed; __Pyx_GOTREF(__pyx_t_21); index = 1; __pyx_t_23 = __pyx_t_24(__pyx_t_22); if (unlikely(!__pyx_t_23)) goto __pyx_L34_unpacking_failed; __Pyx_GOTREF(__pyx_t_23); if (__Pyx_IternextUnpackEndCheck(__pyx_t_24(__pyx_t_22), 2) < 0) __PYX_ERR(0, 230, __pyx_L28_except_error) __pyx_t_24 = NULL; __Pyx_DECREF(__pyx_t_22); __pyx_t_22 = 0; goto __pyx_L35_unpacking_done; __pyx_L34_unpacking_failed:; __Pyx_DECREF(__pyx_t_22); __pyx_t_22 = 0; __pyx_t_24 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 230, __pyx_L28_except_error) __pyx_L35_unpacking_done:; } __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_chunks); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_chunks, __pyx_t_21); __Pyx_GIVEREF(__pyx_t_21); __pyx_t_21 = 0; __pyx_v__ = __pyx_t_23; __pyx_t_23 = 0; } __Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0; __Pyx_XDECREF(__pyx_t_19); __pyx_t_19 = 0; __Pyx_XDECREF(__pyx_t_20); __pyx_t_20 = 0; goto __pyx_L33_try_end; __pyx_L26_error:; __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; /* "borg/item.pyx":224 * try: * master = getattr(self, 'source') * except AttributeError: # <<<<<<<<<<<<<< * # not a hardlink slave, likely a directory or special file w/o chunks * chunks = None */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_AttributeError); if (__pyx_t_8) { __Pyx_AddTraceback("borg.item.Item.get_size", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_13, &__pyx_t_23, &__pyx_t_21) < 0) __PYX_ERR(0, 224, __pyx_L28_except_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_GOTREF(__pyx_t_23); __Pyx_GOTREF(__pyx_t_21); /* "borg/item.pyx":226 * except AttributeError: * # not a hardlink slave, likely a directory or special file w/o chunks * chunks = None # <<<<<<<<<<<<<< * else: * # hardlink slave, try to fetch hardlink master's chunks list */ __Pyx_INCREF(Py_None); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_chunks); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_chunks, Py_None); __Pyx_GIVEREF(Py_None); __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_XDECREF(__pyx_t_23); __pyx_t_23 = 0; __Pyx_XDECREF(__pyx_t_21); __pyx_t_21 = 0; goto __pyx_L27_exception_handled; } goto __pyx_L28_except_error; __pyx_L28_except_error:; /* "borg/item.pyx":222 * chunks = None * else: * try: # <<<<<<<<<<<<<< * master = getattr(self, 'source') * except AttributeError: */ __Pyx_XGIVEREF(__pyx_t_18); __Pyx_XGIVEREF(__pyx_t_19); __Pyx_XGIVEREF(__pyx_t_20); __Pyx_ExceptionReset(__pyx_t_18, __pyx_t_19, __pyx_t_20); goto __pyx_L17_except_error; __pyx_L27_exception_handled:; __Pyx_XGIVEREF(__pyx_t_18); __Pyx_XGIVEREF(__pyx_t_19); __Pyx_XGIVEREF(__pyx_t_20); __Pyx_ExceptionReset(__pyx_t_18, __pyx_t_19, __pyx_t_20); __pyx_L33_try_end:; } } __pyx_L25:; /* "borg/item.pyx":231 * # todo: put precomputed size into hardlink_masters' values and use it, if present * chunks, _ = hardlink_masters.get(master, (None, None)) * if chunks is None: # <<<<<<<<<<<<<< * return 0 * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) */ __pyx_t_3 = (__pyx_cur_scope->__pyx_v_chunks == Py_None); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "borg/item.pyx":232 * chunks, _ = hardlink_masters.get(master, (None, None)) * if chunks is None: * return 0 # <<<<<<<<<<<<<< * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) * # if requested, memorize the precomputed (c)size for items that have an own chunks list: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_int_0); __pyx_r = __pyx_int_0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; goto __pyx_L18_except_return; /* "borg/item.pyx":231 * # todo: put precomputed size into hardlink_masters' values and use it, if present * chunks, _ = hardlink_masters.get(master, (None, None)) * if chunks is None: # <<<<<<<<<<<<<< * return 0 * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) */ } __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L16_exception_handled; } goto __pyx_L17_except_error; __pyx_L17_except_error:; /* "borg/item.pyx":212 * return len(self.source) * # no precomputed (c)size value available, compute it: * try: # <<<<<<<<<<<<<< * chunks = getattr(self, 'chunks') * having_chunks = True */ __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_16); __Pyx_XGIVEREF(__pyx_t_17); __Pyx_ExceptionReset(__pyx_t_15, __pyx_t_16, __pyx_t_17); goto __pyx_L7_except_error; __pyx_L18_except_return:; __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_16); __Pyx_XGIVEREF(__pyx_t_17); __Pyx_ExceptionReset(__pyx_t_15, __pyx_t_16, __pyx_t_17); goto __pyx_L8_except_return; __pyx_L16_exception_handled:; __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_16); __Pyx_XGIVEREF(__pyx_t_17); __Pyx_ExceptionReset(__pyx_t_15, __pyx_t_16, __pyx_t_17); __pyx_L22_try_end:; } /* "borg/item.pyx":233 * if chunks is None: * return 0 * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) # <<<<<<<<<<<<<< * # if requested, memorize the precomputed (c)size for items that have an own chunks list: * if memorize and having_chunks: */ __pyx_t_11 = __pyx_pf_4borg_4item_4Item_8get_size_genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 233, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = __Pyx_PyObject_CallOneArg(__pyx_builtin_sum, __pyx_t_11); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 233, __pyx_L7_except_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF_SET(__pyx_v_size, __pyx_t_12); __pyx_t_12 = 0; /* "borg/item.pyx":235 * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) * # if requested, memorize the precomputed (c)size for items that have an own chunks list: * if memorize and having_chunks: # <<<<<<<<<<<<<< * setattr(self, attr, size) * return size */ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_memorize); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 235, __pyx_L7_except_error) if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L40_bool_binop_done; } __pyx_t_3 = (__pyx_v_having_chunks != 0); __pyx_t_2 = __pyx_t_3; __pyx_L40_bool_binop_done:; if (__pyx_t_2) { /* "borg/item.pyx":236 * # if requested, memorize the precomputed (c)size for items that have an own chunks list: * if memorize and having_chunks: * setattr(self, attr, size) # <<<<<<<<<<<<<< * return size * */ __pyx_t_12 = __pyx_cur_scope->__pyx_v_attr; __Pyx_INCREF(__pyx_t_12); __pyx_t_25 = PyObject_SetAttr(__pyx_v_self, __pyx_t_12, __pyx_v_size); if (unlikely(__pyx_t_25 == ((int)-1))) __PYX_ERR(0, 236, __pyx_L7_except_error) __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; /* "borg/item.pyx":235 * size = sum(getattr(ChunkListEntry(*chunk), attr) for chunk in chunks) * # if requested, memorize the precomputed (c)size for items that have an own chunks list: * if memorize and having_chunks: # <<<<<<<<<<<<<< * setattr(self, attr, size) * return size */ } __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L6_exception_handled; } goto __pyx_L7_except_error; __pyx_L7_except_error:; /* "borg/item.pyx":202 * attr = 'csize' if compressed else 'size' * assert not (compressed and memorize), 'Item does not have a csize field.' * try: # <<<<<<<<<<<<<< * if from_chunks: * raise AttributeError */ __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L1_error; __pyx_L8_except_return:; __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L0; __pyx_L6_exception_handled:; __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); __pyx_L10_try_end:; } /* "borg/item.pyx":237 * if memorize and having_chunks: * setattr(self, attr, size) * return size # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_size); __pyx_r = __pyx_v_size; goto __pyx_L0; /* "borg/item.pyx":191 * part = PropDict._make_property('part', int) * * def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): # <<<<<<<<<<<<<< * """ * Determine the (uncompressed or compressed) size of this item. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_12); __Pyx_XDECREF(__pyx_t_13); __Pyx_XDECREF(__pyx_t_21); __Pyx_XDECREF(__pyx_t_22); __Pyx_XDECREF(__pyx_t_23); __Pyx_AddTraceback("borg.item.Item.get_size", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_size); __Pyx_XDECREF(__pyx_v_master); __Pyx_XDECREF(__pyx_v__); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *__pyx_freelist_4borg_4item___pyx_scope_struct___make_property[8]; static int __pyx_freecount_4borg_4item___pyx_scope_struct___make_property = 0; static PyObject *__pyx_tp_new_4borg_4item___pyx_scope_struct___make_property(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_4borg_4item___pyx_scope_struct___make_property > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property)))) { o = (PyObject*)__pyx_freelist_4borg_4item___pyx_scope_struct___make_property[--__pyx_freecount_4borg_4item___pyx_scope_struct___make_property]; memset(o, 0, sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_4borg_4item___pyx_scope_struct___make_property(PyObject *o) { struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_v_attr_error_msg); Py_CLEAR(p->__pyx_v_decode); Py_CLEAR(p->__pyx_v_encode); Py_CLEAR(p->__pyx_v_key); Py_CLEAR(p->__pyx_v_type_error_msg); Py_CLEAR(p->__pyx_v_value_type); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_4borg_4item___pyx_scope_struct___make_property < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property)))) { __pyx_freelist_4borg_4item___pyx_scope_struct___make_property[__pyx_freecount_4borg_4item___pyx_scope_struct___make_property++] = ((struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_4borg_4item___pyx_scope_struct___make_property(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *)o; if (p->__pyx_v_decode) { e = (*v)(p->__pyx_v_decode, a); if (e) return e; } if (p->__pyx_v_encode) { e = (*v)(p->__pyx_v_encode, a); if (e) return e; } if (p->__pyx_v_key) { e = (*v)(p->__pyx_v_key, a); if (e) return e; } if (p->__pyx_v_value_type) { e = (*v)(p->__pyx_v_value_type, a); if (e) return e; } return 0; } static int __pyx_tp_clear_4borg_4item___pyx_scope_struct___make_property(PyObject *o) { PyObject* tmp; struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property *)o; tmp = ((PyObject*)p->__pyx_v_decode); p->__pyx_v_decode = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_encode); p->__pyx_v_encode = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_key); p->__pyx_v_key = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_value_type); p->__pyx_v_value_type = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyTypeObject __pyx_type_4borg_4item___pyx_scope_struct___make_property = { PyVarObject_HEAD_INIT(0, 0) "borg.item.__pyx_scope_struct___make_property", /*tp_name*/ sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct___make_property), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_4item___pyx_scope_struct___make_property, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_4borg_4item___pyx_scope_struct___make_property, /*tp_traverse*/ __pyx_tp_clear_4borg_4item___pyx_scope_struct___make_property, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_4item___pyx_scope_struct___make_property, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *__pyx_freelist_4borg_4item___pyx_scope_struct_1_get_size[8]; static int __pyx_freecount_4borg_4item___pyx_scope_struct_1_get_size = 0; static PyObject *__pyx_tp_new_4borg_4item___pyx_scope_struct_1_get_size(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_4borg_4item___pyx_scope_struct_1_get_size > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size)))) { o = (PyObject*)__pyx_freelist_4borg_4item___pyx_scope_struct_1_get_size[--__pyx_freecount_4borg_4item___pyx_scope_struct_1_get_size]; memset(o, 0, sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_4borg_4item___pyx_scope_struct_1_get_size(PyObject *o) { struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_v_attr); Py_CLEAR(p->__pyx_v_chunks); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_4borg_4item___pyx_scope_struct_1_get_size < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size)))) { __pyx_freelist_4borg_4item___pyx_scope_struct_1_get_size[__pyx_freecount_4borg_4item___pyx_scope_struct_1_get_size++] = ((struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_4borg_4item___pyx_scope_struct_1_get_size(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *)o; if (p->__pyx_v_chunks) { e = (*v)(p->__pyx_v_chunks, a); if (e) return e; } return 0; } static int __pyx_tp_clear_4borg_4item___pyx_scope_struct_1_get_size(PyObject *o) { PyObject* tmp; struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size *)o; tmp = ((PyObject*)p->__pyx_v_chunks); p->__pyx_v_chunks = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyTypeObject __pyx_type_4borg_4item___pyx_scope_struct_1_get_size = { PyVarObject_HEAD_INIT(0, 0) "borg.item.__pyx_scope_struct_1_get_size", /*tp_name*/ sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_1_get_size), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_4item___pyx_scope_struct_1_get_size, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_4borg_4item___pyx_scope_struct_1_get_size, /*tp_traverse*/ __pyx_tp_clear_4borg_4item___pyx_scope_struct_1_get_size, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_4item___pyx_scope_struct_1_get_size, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *__pyx_freelist_4borg_4item___pyx_scope_struct_2_genexpr[8]; static int __pyx_freecount_4borg_4item___pyx_scope_struct_2_genexpr = 0; static PyObject *__pyx_tp_new_4borg_4item___pyx_scope_struct_2_genexpr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_4borg_4item___pyx_scope_struct_2_genexpr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr)))) { o = (PyObject*)__pyx_freelist_4borg_4item___pyx_scope_struct_2_genexpr[--__pyx_freecount_4borg_4item___pyx_scope_struct_2_genexpr]; memset(o, 0, sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_4borg_4item___pyx_scope_struct_2_genexpr(PyObject *o) { struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_outer_scope); Py_CLEAR(p->__pyx_v_chunk); Py_CLEAR(p->__pyx_t_0); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_4borg_4item___pyx_scope_struct_2_genexpr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr)))) { __pyx_freelist_4borg_4item___pyx_scope_struct_2_genexpr[__pyx_freecount_4borg_4item___pyx_scope_struct_2_genexpr++] = ((struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_4borg_4item___pyx_scope_struct_2_genexpr(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *p = (struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr *)o; if (p->__pyx_outer_scope) { e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e; } if (p->__pyx_v_chunk) { e = (*v)(p->__pyx_v_chunk, a); if (e) return e; } if (p->__pyx_t_0) { e = (*v)(p->__pyx_t_0, a); if (e) return e; } return 0; } static PyTypeObject __pyx_type_4borg_4item___pyx_scope_struct_2_genexpr = { PyVarObject_HEAD_INIT(0, 0) "borg.item.__pyx_scope_struct_2_genexpr", /*tp_name*/ sizeof(struct __pyx_obj_4borg_4item___pyx_scope_struct_2_genexpr), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_4item___pyx_scope_struct_2_genexpr, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_4borg_4item___pyx_scope_struct_2_genexpr, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_4item___pyx_scope_struct_2_genexpr, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_item(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_item}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "item", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1_1_03, __pyx_k_1_1_03, sizeof(__pyx_k_1_1_03), 0, 1, 0, 0}, {&__pyx_n_s_API_VERSION, __pyx_k_API_VERSION, sizeof(__pyx_k_API_VERSION), 0, 0, 1, 1}, {&__pyx_n_s_ARCHIVE_KEYS, __pyx_k_ARCHIVE_KEYS, sizeof(__pyx_k_ARCHIVE_KEYS), 0, 0, 1, 1}, {&__pyx_n_s_ArchiveItem, __pyx_k_ArchiveItem, sizeof(__pyx_k_ArchiveItem), 0, 0, 1, 1}, {&__pyx_kp_s_ArchiveItem_abstraction_that_de, __pyx_k_ArchiveItem_abstraction_that_de, sizeof(__pyx_k_ArchiveItem_abstraction_that_de), 0, 0, 1, 0}, {&__pyx_n_s_AttributeError, __pyx_k_AttributeError, sizeof(__pyx_k_AttributeError), 0, 0, 1, 1}, {&__pyx_n_s_ChunkListEntry, __pyx_k_ChunkListEntry, sizeof(__pyx_k_ChunkListEntry), 0, 0, 1, 1}, {&__pyx_n_u_ChunkListEntry, __pyx_k_ChunkListEntry, sizeof(__pyx_k_ChunkListEntry), 0, 1, 0, 1}, {&__pyx_n_s_EncryptedKey, __pyx_k_EncryptedKey, sizeof(__pyx_k_EncryptedKey), 0, 0, 1, 1}, {&__pyx_kp_s_EncryptedKey_abstraction_that_d, __pyx_k_EncryptedKey_abstraction_that_d, sizeof(__pyx_k_EncryptedKey_abstraction_that_d), 0, 0, 1, 0}, {&__pyx_n_s_ITEM_KEYS, __pyx_k_ITEM_KEYS, sizeof(__pyx_k_ITEM_KEYS), 0, 0, 1, 1}, {&__pyx_n_s_Item, __pyx_k_Item, sizeof(__pyx_k_Item), 0, 0, 1, 1}, {&__pyx_kp_s_Item_abstraction_that_deals_wit, __pyx_k_Item_abstraction_that_deals_wit, sizeof(__pyx_k_Item_abstraction_that_deals_wit), 0, 0, 1, 0}, {&__pyx_kp_u_Item_does_not_have_a_csize_field, __pyx_k_Item_does_not_have_a_csize_field, sizeof(__pyx_k_Item_does_not_have_a_csize_field), 0, 1, 0, 0}, {&__pyx_n_s_Item_get_size, __pyx_k_Item_get_size, sizeof(__pyx_k_Item_get_size), 0, 0, 1, 1}, {&__pyx_n_s_Item_get_size_locals_genexpr, __pyx_k_Item_get_size_locals_genexpr, sizeof(__pyx_k_Item_get_size_locals_genexpr), 0, 0, 1, 1}, {&__pyx_n_s_Key, __pyx_k_Key, sizeof(__pyx_k_Key), 0, 0, 1, 1}, {&__pyx_n_s_KeyError, __pyx_k_KeyError, sizeof(__pyx_k_KeyError), 0, 0, 1, 1}, {&__pyx_kp_s_Key_abstraction_that_deals_with, __pyx_k_Key_abstraction_that_deals_with, sizeof(__pyx_k_Key_abstraction_that_deals_with), 0, 0, 1, 0}, {&__pyx_kp_s_Manage_a_dictionary_via_propert, __pyx_k_Manage_a_dictionary_via_propert, sizeof(__pyx_k_Manage_a_dictionary_via_propert), 0, 0, 1, 0}, {&__pyx_n_s_ManifestItem, __pyx_k_ManifestItem, sizeof(__pyx_k_ManifestItem), 0, 0, 1, 1}, {&__pyx_kp_s_ManifestItem_abstraction_that_d, __pyx_k_ManifestItem_abstraction_that_d, sizeof(__pyx_k_ManifestItem_abstraction_that_d), 0, 0, 1, 0}, {&__pyx_n_s_PropDict, __pyx_k_PropDict, sizeof(__pyx_k_PropDict), 0, 0, 1, 1}, {&__pyx_n_s_PropDict___contains, __pyx_k_PropDict___contains, sizeof(__pyx_k_PropDict___contains), 0, 0, 1, 1}, {&__pyx_n_s_PropDict___eq, __pyx_k_PropDict___eq, sizeof(__pyx_k_PropDict___eq), 0, 0, 1, 1}, {&__pyx_n_s_PropDict___init, __pyx_k_PropDict___init, sizeof(__pyx_k_PropDict___init), 0, 0, 1, 1}, {&__pyx_n_s_PropDict___repr, __pyx_k_PropDict___repr, sizeof(__pyx_k_PropDict___repr), 0, 0, 1, 1}, {&__pyx_n_s_PropDict__check_key, __pyx_k_PropDict__check_key, sizeof(__pyx_k_PropDict__check_key), 0, 0, 1, 1}, {&__pyx_n_s_PropDict__make_property, __pyx_k_PropDict__make_property, sizeof(__pyx_k_PropDict__make_property), 0, 0, 1, 1}, {&__pyx_n_s_PropDict__make_property_locals, __pyx_k_PropDict__make_property_locals, sizeof(__pyx_k_PropDict__make_property_locals), 0, 0, 1, 1}, {&__pyx_n_s_PropDict__make_property_locals_2, __pyx_k_PropDict__make_property_locals_2, sizeof(__pyx_k_PropDict__make_property_locals_2), 0, 0, 1, 1}, {&__pyx_n_s_PropDict__make_property_locals_3, __pyx_k_PropDict__make_property_locals_3, sizeof(__pyx_k_PropDict__make_property_locals_3), 0, 0, 1, 1}, {&__pyx_n_s_PropDict_as_dict, __pyx_k_PropDict_as_dict, sizeof(__pyx_k_PropDict_as_dict), 0, 0, 1, 1}, {&__pyx_n_s_PropDict_get, __pyx_k_PropDict_get, sizeof(__pyx_k_PropDict_get), 0, 0, 1, 1}, {&__pyx_n_s_PropDict_update, __pyx_k_PropDict_update, sizeof(__pyx_k_PropDict_update), 0, 0, 1, 1}, {&__pyx_n_s_PropDict_update_internal, __pyx_k_PropDict_update_internal, sizeof(__pyx_k_PropDict_update_internal), 0, 0, 1, 1}, {&__pyx_n_s_S_ISLNK, __pyx_k_S_ISLNK, sizeof(__pyx_k_S_ISLNK), 0, 0, 1, 1}, {&__pyx_n_s_StableDict, __pyx_k_StableDict, sizeof(__pyx_k_StableDict), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_n_s_VALID_KEYS, __pyx_k_VALID_KEYS, sizeof(__pyx_k_VALID_KEYS), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_kp_u__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 1, 0, 0}, {&__pyx_n_s__38, __pyx_k__38, sizeof(__pyx_k__38), 0, 0, 1, 1}, {&__pyx_kp_u__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 1, 0, 0}, {&__pyx_n_s_acl_access, __pyx_k_acl_access, sizeof(__pyx_k_acl_access), 0, 0, 1, 1}, {&__pyx_n_u_acl_access, __pyx_k_acl_access, sizeof(__pyx_k_acl_access), 0, 1, 0, 1}, {&__pyx_n_s_acl_default, __pyx_k_acl_default, sizeof(__pyx_k_acl_default), 0, 0, 1, 1}, {&__pyx_n_u_acl_default, __pyx_k_acl_default, sizeof(__pyx_k_acl_default), 0, 1, 0, 1}, {&__pyx_n_s_acl_extended, __pyx_k_acl_extended, sizeof(__pyx_k_acl_extended), 0, 0, 1, 1}, {&__pyx_n_u_acl_extended, __pyx_k_acl_extended, sizeof(__pyx_k_acl_extended), 0, 1, 0, 1}, {&__pyx_n_s_acl_nfs4, __pyx_k_acl_nfs4, sizeof(__pyx_k_acl_nfs4), 0, 0, 1, 1}, {&__pyx_n_u_acl_nfs4, __pyx_k_acl_nfs4, sizeof(__pyx_k_acl_nfs4), 0, 1, 0, 1}, {&__pyx_n_s_algorithm, __pyx_k_algorithm, sizeof(__pyx_k_algorithm), 0, 0, 1, 1}, {&__pyx_n_u_algorithm, __pyx_k_algorithm, sizeof(__pyx_k_algorithm), 0, 1, 0, 1}, {&__pyx_n_s_archives, __pyx_k_archives, sizeof(__pyx_k_archives), 0, 0, 1, 1}, {&__pyx_n_u_archives, __pyx_k_archives, sizeof(__pyx_k_archives), 0, 1, 0, 1}, {&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1}, {&__pyx_n_s_as_dict, __pyx_k_as_dict, sizeof(__pyx_k_as_dict), 0, 0, 1, 1}, {&__pyx_n_s_atime, __pyx_k_atime, sizeof(__pyx_k_atime), 0, 0, 1, 1}, {&__pyx_n_u_atime, __pyx_k_atime, sizeof(__pyx_k_atime), 0, 1, 0, 1}, {&__pyx_n_s_attr, __pyx_k_attr, sizeof(__pyx_k_attr), 0, 0, 1, 1}, {&__pyx_n_s_attr_error_msg, __pyx_k_attr_error_msg, sizeof(__pyx_k_attr_error_msg), 0, 0, 1, 1}, {&__pyx_kp_u_attribute_s_not_found, __pyx_k_attribute_s_not_found, sizeof(__pyx_k_attribute_s_not_found), 0, 1, 0, 0}, {&__pyx_n_u_bigint, __pyx_k_bigint, sizeof(__pyx_k_bigint), 0, 1, 0, 1}, {&__pyx_n_s_bigint_to_int, __pyx_k_bigint_to_int, sizeof(__pyx_k_bigint_to_int), 0, 0, 1, 1}, {&__pyx_n_s_birthtime, __pyx_k_birthtime, sizeof(__pyx_k_birthtime), 0, 0, 1, 1}, {&__pyx_n_u_birthtime, __pyx_k_birthtime, sizeof(__pyx_k_birthtime), 0, 1, 0, 1}, {&__pyx_n_s_borg_item, __pyx_k_borg_item, sizeof(__pyx_k_borg_item), 0, 0, 1, 1}, {&__pyx_n_s_bsdflags, __pyx_k_bsdflags, sizeof(__pyx_k_bsdflags), 0, 0, 1, 1}, {&__pyx_n_u_bsdflags, __pyx_k_bsdflags, sizeof(__pyx_k_bsdflags), 0, 1, 0, 1}, {&__pyx_n_s_check_key, __pyx_k_check_key, sizeof(__pyx_k_check_key), 0, 0, 1, 1}, {&__pyx_n_s_chunk_seed, __pyx_k_chunk_seed, sizeof(__pyx_k_chunk_seed), 0, 0, 1, 1}, {&__pyx_n_u_chunk_seed, __pyx_k_chunk_seed, sizeof(__pyx_k_chunk_seed), 0, 1, 0, 1}, {&__pyx_n_s_chunker_params, __pyx_k_chunker_params, sizeof(__pyx_k_chunker_params), 0, 0, 1, 1}, {&__pyx_n_u_chunker_params, __pyx_k_chunker_params, sizeof(__pyx_k_chunker_params), 0, 1, 0, 1}, {&__pyx_n_s_chunks, __pyx_k_chunks, sizeof(__pyx_k_chunks), 0, 0, 1, 1}, {&__pyx_n_u_chunks, __pyx_k_chunks, sizeof(__pyx_k_chunks), 0, 1, 0, 1}, {&__pyx_n_s_chunks_healthy, __pyx_k_chunks_healthy, sizeof(__pyx_k_chunks_healthy), 0, 0, 1, 1}, {&__pyx_n_u_chunks_healthy, __pyx_k_chunks_healthy, sizeof(__pyx_k_chunks_healthy), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_close, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1}, {&__pyx_n_s_cmdline, __pyx_k_cmdline, sizeof(__pyx_k_cmdline), 0, 0, 1, 1}, {&__pyx_n_u_cmdline, __pyx_k_cmdline, sizeof(__pyx_k_cmdline), 0, 1, 0, 1}, {&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1}, {&__pyx_n_s_comment, __pyx_k_comment, sizeof(__pyx_k_comment), 0, 0, 1, 1}, {&__pyx_n_u_comment, __pyx_k_comment, sizeof(__pyx_k_comment), 0, 1, 0, 1}, {&__pyx_n_s_compressed, __pyx_k_compressed, sizeof(__pyx_k_compressed), 0, 0, 1, 1}, {&__pyx_n_s_config, __pyx_k_config, sizeof(__pyx_k_config), 0, 0, 1, 1}, {&__pyx_n_u_config, __pyx_k_config, sizeof(__pyx_k_config), 0, 1, 0, 1}, {&__pyx_n_s_constants, __pyx_k_constants, sizeof(__pyx_k_constants), 0, 0, 1, 1}, {&__pyx_n_s_contains, __pyx_k_contains, sizeof(__pyx_k_contains), 0, 0, 1, 1}, {&__pyx_n_u_csize, __pyx_k_csize, sizeof(__pyx_k_csize), 0, 1, 0, 1}, {&__pyx_n_s_ctime, __pyx_k_ctime, sizeof(__pyx_k_ctime), 0, 0, 1, 1}, {&__pyx_n_u_ctime, __pyx_k_ctime, sizeof(__pyx_k_ctime), 0, 1, 0, 1}, {&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1}, {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, {&__pyx_n_u_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 1, 0, 1}, {&__pyx_n_s_data_dict, __pyx_k_data_dict, sizeof(__pyx_k_data_dict), 0, 0, 1, 1}, {&__pyx_kp_u_data_dict_must_be_a_dict, __pyx_k_data_dict_must_be_a_dict, sizeof(__pyx_k_data_dict_must_be_a_dict), 0, 1, 0, 0}, {&__pyx_n_s_decode, __pyx_k_decode, sizeof(__pyx_k_decode), 0, 0, 1, 1}, {&__pyx_n_s_default, __pyx_k_default, sizeof(__pyx_k_default), 0, 0, 1, 1}, {&__pyx_n_s_del, __pyx_k_del, sizeof(__pyx_k_del), 0, 0, 1, 1}, {&__pyx_n_s_deleted, __pyx_k_deleted, sizeof(__pyx_k_deleted), 0, 0, 1, 1}, {&__pyx_n_u_deleted, __pyx_k_deleted, sizeof(__pyx_k_deleted), 0, 1, 0, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_u_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 1, 0, 1}, {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, {&__pyx_n_s_doc_2, __pyx_k_doc_2, sizeof(__pyx_k_doc_2), 0, 0, 1, 1}, {&__pyx_n_s_enc_hmac_key, __pyx_k_enc_hmac_key, sizeof(__pyx_k_enc_hmac_key), 0, 0, 1, 1}, {&__pyx_n_u_enc_hmac_key, __pyx_k_enc_hmac_key, sizeof(__pyx_k_enc_hmac_key), 0, 1, 0, 1}, {&__pyx_n_s_enc_key, __pyx_k_enc_key, sizeof(__pyx_k_enc_key), 0, 0, 1, 1}, {&__pyx_n_u_enc_key, __pyx_k_enc_key, sizeof(__pyx_k_enc_key), 0, 1, 0, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_eq, __pyx_k_eq, sizeof(__pyx_k_eq), 0, 0, 1, 1}, {&__pyx_n_s_from_chunks, __pyx_k_from_chunks, sizeof(__pyx_k_from_chunks), 0, 0, 1, 1}, {&__pyx_n_s_genexpr, __pyx_k_genexpr, sizeof(__pyx_k_genexpr), 0, 0, 1, 1}, {&__pyx_n_s_get, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1}, {&__pyx_n_s_get_2, __pyx_k_get_2, sizeof(__pyx_k_get_2), 0, 0, 1, 1}, {&__pyx_n_s_get_size, __pyx_k_get_size, sizeof(__pyx_k_get_size), 0, 0, 1, 1}, {&__pyx_n_s_gid, __pyx_k_gid, sizeof(__pyx_k_gid), 0, 0, 1, 1}, {&__pyx_n_u_gid, __pyx_k_gid, sizeof(__pyx_k_gid), 0, 1, 0, 1}, {&__pyx_n_s_group, __pyx_k_group, sizeof(__pyx_k_group), 0, 0, 1, 1}, {&__pyx_n_u_group, __pyx_k_group, sizeof(__pyx_k_group), 0, 1, 0, 1}, {&__pyx_n_s_hardlink_master, __pyx_k_hardlink_master, sizeof(__pyx_k_hardlink_master), 0, 0, 1, 1}, {&__pyx_n_u_hardlink_master, __pyx_k_hardlink_master, sizeof(__pyx_k_hardlink_master), 0, 1, 0, 1}, {&__pyx_n_s_hardlink_masters, __pyx_k_hardlink_masters, sizeof(__pyx_k_hardlink_masters), 0, 0, 1, 1}, {&__pyx_n_s_hash, __pyx_k_hash, sizeof(__pyx_k_hash), 0, 0, 1, 1}, {&__pyx_n_u_hash, __pyx_k_hash, sizeof(__pyx_k_hash), 0, 1, 0, 1}, {&__pyx_n_s_having_chunks, __pyx_k_having_chunks, sizeof(__pyx_k_having_chunks), 0, 0, 1, 1}, {&__pyx_n_s_helpers, __pyx_k_helpers, sizeof(__pyx_k_helpers), 0, 0, 1, 1}, {&__pyx_n_s_hostname, __pyx_k_hostname, sizeof(__pyx_k_hostname), 0, 0, 1, 1}, {&__pyx_n_u_hostname, __pyx_k_hostname, sizeof(__pyx_k_hostname), 0, 1, 0, 1}, {&__pyx_n_s_id_key, __pyx_k_id_key, sizeof(__pyx_k_id_key), 0, 0, 1, 1}, {&__pyx_n_u_id_key, __pyx_k_id_key, sizeof(__pyx_k_id_key), 0, 1, 0, 1}, {&__pyx_kp_u_id_size_csize, __pyx_k_id_size_csize, sizeof(__pyx_k_id_size_csize), 0, 1, 0, 0}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, {&__pyx_n_s_int_to_bigint, __pyx_k_int_to_bigint, sizeof(__pyx_k_int_to_bigint), 0, 0, 1, 1}, {&__pyx_n_s_internal_dict, __pyx_k_internal_dict, sizeof(__pyx_k_internal_dict), 0, 0, 1, 1}, {&__pyx_kp_u_internal_dict_2, __pyx_k_internal_dict_2, sizeof(__pyx_k_internal_dict_2), 0, 1, 0, 0}, {&__pyx_kp_u_internal_dict_must_be_a_dict, __pyx_k_internal_dict_must_be_a_dict, sizeof(__pyx_k_internal_dict_must_be_a_dict), 0, 1, 0, 0}, {&__pyx_n_s_item_keys, __pyx_k_item_keys, sizeof(__pyx_k_item_keys), 0, 0, 1, 1}, {&__pyx_n_u_item_keys, __pyx_k_item_keys, sizeof(__pyx_k_item_keys), 0, 1, 0, 1}, {&__pyx_n_s_items, __pyx_k_items, sizeof(__pyx_k_items), 0, 0, 1, 1}, {&__pyx_n_u_items, __pyx_k_items, sizeof(__pyx_k_items), 0, 1, 0, 1}, {&__pyx_n_s_iterations, __pyx_k_iterations, sizeof(__pyx_k_iterations), 0, 0, 1, 1}, {&__pyx_n_u_iterations, __pyx_k_iterations, sizeof(__pyx_k_iterations), 0, 1, 0, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_key, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1}, {&__pyx_kp_u_key_must_be_str, __pyx_k_key_must_be_str, sizeof(__pyx_k_key_must_be_str), 0, 1, 0, 0}, {&__pyx_kp_u_key_s_is_not_a_valid_key, __pyx_k_key_s_is_not_a_valid_key, sizeof(__pyx_k_key_s_is_not_a_valid_key), 0, 1, 0, 0}, {&__pyx_n_s_kw, __pyx_k_kw, sizeof(__pyx_k_kw), 0, 0, 1, 1}, {&__pyx_kp_u_list_or_None, __pyx_k_list_or_None, sizeof(__pyx_k_list_or_None), 0, 1, 0, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_make_property, __pyx_k_make_property, sizeof(__pyx_k_make_property), 0, 0, 1, 1}, {&__pyx_n_s_master, __pyx_k_master, sizeof(__pyx_k_master), 0, 0, 1, 1}, {&__pyx_n_s_memorize, __pyx_k_memorize, sizeof(__pyx_k_memorize), 0, 0, 1, 1}, {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_u_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 1, 0, 1}, {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, {&__pyx_n_s_mtime, __pyx_k_mtime, sizeof(__pyx_k_mtime), 0, 0, 1, 1}, {&__pyx_n_u_mtime, __pyx_k_mtime, sizeof(__pyx_k_mtime), 0, 1, 0, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_u_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 1, 0, 1}, {&__pyx_n_s_namedtuple, __pyx_k_namedtuple, sizeof(__pyx_k_namedtuple), 0, 0, 1, 1}, {&__pyx_n_s_nlink, __pyx_k_nlink, sizeof(__pyx_k_nlink), 0, 0, 1, 1}, {&__pyx_n_u_nlink, __pyx_k_nlink, sizeof(__pyx_k_nlink), 0, 1, 0, 1}, {&__pyx_n_s_other, __pyx_k_other, sizeof(__pyx_k_other), 0, 0, 1, 1}, {&__pyx_n_s_part, __pyx_k_part, sizeof(__pyx_k_part), 0, 0, 1, 1}, {&__pyx_n_u_part, __pyx_k_part, sizeof(__pyx_k_part), 0, 1, 0, 1}, {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, {&__pyx_n_u_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 1, 0, 1}, {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, {&__pyx_n_s_property, __pyx_k_property, sizeof(__pyx_k_property), 0, 0, 1, 1}, {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, {&__pyx_n_s_rdev, __pyx_k_rdev, sizeof(__pyx_k_rdev), 0, 0, 1, 1}, {&__pyx_n_u_rdev, __pyx_k_rdev, sizeof(__pyx_k_rdev), 0, 1, 0, 1}, {&__pyx_n_s_recreate_args, __pyx_k_recreate_args, sizeof(__pyx_k_recreate_args), 0, 0, 1, 1}, {&__pyx_n_u_recreate_args, __pyx_k_recreate_args, sizeof(__pyx_k_recreate_args), 0, 1, 0, 1}, {&__pyx_n_s_recreate_cmdline, __pyx_k_recreate_cmdline, sizeof(__pyx_k_recreate_cmdline), 0, 0, 1, 1}, {&__pyx_n_u_recreate_cmdline, __pyx_k_recreate_cmdline, sizeof(__pyx_k_recreate_cmdline), 0, 1, 0, 1}, {&__pyx_n_s_recreate_partial_chunks, __pyx_k_recreate_partial_chunks, sizeof(__pyx_k_recreate_partial_chunks), 0, 0, 1, 1}, {&__pyx_n_u_recreate_partial_chunks, __pyx_k_recreate_partial_chunks, sizeof(__pyx_k_recreate_partial_chunks), 0, 1, 0, 1}, {&__pyx_n_s_recreate_source_id, __pyx_k_recreate_source_id, sizeof(__pyx_k_recreate_source_id), 0, 0, 1, 1}, {&__pyx_n_u_recreate_source_id, __pyx_k_recreate_source_id, sizeof(__pyx_k_recreate_source_id), 0, 1, 0, 1}, {&__pyx_n_s_repository_id, __pyx_k_repository_id, sizeof(__pyx_k_repository_id), 0, 0, 1, 1}, {&__pyx_n_u_repository_id, __pyx_k_repository_id, sizeof(__pyx_k_repository_id), 0, 1, 0, 1}, {&__pyx_n_s_repr, __pyx_k_repr, sizeof(__pyx_k_repr), 0, 0, 1, 1}, {&__pyx_n_s_safe_decode, __pyx_k_safe_decode, sizeof(__pyx_k_safe_decode), 0, 0, 1, 1}, {&__pyx_n_s_safe_encode, __pyx_k_safe_encode, sizeof(__pyx_k_safe_encode), 0, 0, 1, 1}, {&__pyx_n_s_salt, __pyx_k_salt, sizeof(__pyx_k_salt), 0, 0, 1, 1}, {&__pyx_n_u_salt, __pyx_k_salt, sizeof(__pyx_k_salt), 0, 1, 0, 1}, {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1}, {&__pyx_n_s_send, __pyx_k_send, sizeof(__pyx_k_send), 0, 0, 1, 1}, {&__pyx_n_s_set, __pyx_k_set, sizeof(__pyx_k_set), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_u_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 1, 0, 1}, {&__pyx_n_s_slots, __pyx_k_slots, sizeof(__pyx_k_slots), 0, 0, 1, 1}, {&__pyx_n_s_source, __pyx_k_source, sizeof(__pyx_k_source), 0, 0, 1, 1}, {&__pyx_n_u_source, __pyx_k_source, sizeof(__pyx_k_source), 0, 1, 0, 1}, {&__pyx_kp_s_src_borg_item_pyx, __pyx_k_src_borg_item_pyx, sizeof(__pyx_k_src_borg_item_pyx), 0, 0, 1, 0}, {&__pyx_n_s_stat, __pyx_k_stat, sizeof(__pyx_k_stat), 0, 0, 1, 1}, {&__pyx_n_s_staticmethod, __pyx_k_staticmethod, sizeof(__pyx_k_staticmethod), 0, 0, 1, 1}, {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, {&__pyx_kp_u_surrogate_escaped_str, __pyx_k_surrogate_escaped_str, sizeof(__pyx_k_surrogate_escaped_str), 0, 1, 0, 0}, {&__pyx_kp_u_surrogate_escaped_str_or_None, __pyx_k_surrogate_escaped_str_or_None, sizeof(__pyx_k_surrogate_escaped_str_or_None), 0, 1, 0, 0}, {&__pyx_n_s_tam_required, __pyx_k_tam_required, sizeof(__pyx_k_tam_required), 0, 0, 1, 1}, {&__pyx_n_u_tam_required, __pyx_k_tam_required, sizeof(__pyx_k_tam_required), 0, 1, 0, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_throw, __pyx_k_throw, sizeof(__pyx_k_throw), 0, 0, 1, 1}, {&__pyx_n_s_time, __pyx_k_time, sizeof(__pyx_k_time), 0, 0, 1, 1}, {&__pyx_n_u_time, __pyx_k_time, sizeof(__pyx_k_time), 0, 1, 0, 1}, {&__pyx_n_s_time_end, __pyx_k_time_end, sizeof(__pyx_k_time_end), 0, 0, 1, 1}, {&__pyx_n_u_time_end, __pyx_k_time_end, sizeof(__pyx_k_time_end), 0, 1, 0, 1}, {&__pyx_n_s_timestamp, __pyx_k_timestamp, sizeof(__pyx_k_timestamp), 0, 0, 1, 1}, {&__pyx_n_u_timestamp, __pyx_k_timestamp, sizeof(__pyx_k_timestamp), 0, 1, 0, 1}, {&__pyx_n_s_type_error_msg, __pyx_k_type_error_msg, sizeof(__pyx_k_type_error_msg), 0, 0, 1, 1}, {&__pyx_n_s_uid, __pyx_k_uid, sizeof(__pyx_k_uid), 0, 0, 1, 1}, {&__pyx_n_u_uid, __pyx_k_uid, sizeof(__pyx_k_uid), 0, 1, 0, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_update_internal, __pyx_k_update_internal, sizeof(__pyx_k_update_internal), 0, 0, 1, 1}, {&__pyx_n_s_user, __pyx_k_user, sizeof(__pyx_k_user), 0, 0, 1, 1}, {&__pyx_n_u_user, __pyx_k_user, sizeof(__pyx_k_user), 0, 1, 0, 1}, {&__pyx_n_s_username, __pyx_k_username, sizeof(__pyx_k_username), 0, 0, 1, 1}, {&__pyx_n_u_username, __pyx_k_username, sizeof(__pyx_k_username), 0, 1, 0, 1}, {&__pyx_n_s_v, __pyx_k_v, sizeof(__pyx_k_v), 0, 0, 1, 1}, {&__pyx_n_s_value, __pyx_k_value, sizeof(__pyx_k_value), 0, 0, 1, 1}, {&__pyx_kp_u_value_must_be, __pyx_k_value_must_be, sizeof(__pyx_k_value_must_be), 0, 1, 0, 0}, {&__pyx_n_s_value_type, __pyx_k_value_type, sizeof(__pyx_k_value_type), 0, 0, 1, 1}, {&__pyx_n_s_value_type_name, __pyx_k_value_type_name, sizeof(__pyx_k_value_type_name), 0, 0, 1, 1}, {&__pyx_n_s_version, __pyx_k_version, sizeof(__pyx_k_version), 0, 0, 1, 1}, {&__pyx_n_u_version, __pyx_k_version, sizeof(__pyx_k_version), 0, 1, 0, 1}, {&__pyx_n_s_xattrs, __pyx_k_xattrs, sizeof(__pyx_k_xattrs), 0, 0, 1, 1}, {&__pyx_n_u_xattrs, __pyx_k_xattrs, sizeof(__pyx_k_xattrs), 0, 1, 0, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_staticmethod = __Pyx_GetBuiltinName(__pyx_n_s_staticmethod); if (!__pyx_builtin_staticmethod) __PYX_ERR(0, 93, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 45, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 82, __pyx_L1_error) __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) __PYX_ERR(0, 125, __pyx_L1_error) __pyx_builtin_KeyError = __Pyx_GetBuiltinName(__pyx_n_s_KeyError); if (!__pyx_builtin_KeyError) __PYX_ERR(0, 106, __pyx_L1_error) __pyx_builtin_AttributeError = __Pyx_GetBuiltinName(__pyx_n_s_AttributeError); if (!__pyx_builtin_AttributeError) __PYX_ERR(0, 107, __pyx_L1_error) __pyx_builtin_sum = __Pyx_GetBuiltinName(__pyx_n_s_sum); if (!__pyx_builtin_sum) __PYX_ERR(0, 233, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "borg/item.pyx":45 * self.update_internal(internal_dict) * else: * raise TypeError("internal_dict must be a dict") # <<<<<<<<<<<<<< * if data_dict is None: * data = kw */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_internal_dict_must_be_a_dict); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "borg/item.pyx":51 * data = data_dict * else: * raise TypeError("data_dict must be a dict") # <<<<<<<<<<<<<< * if data: * self.update(data) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_data_dict_must_be_a_dict); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "borg/item.pyx":80 * """make sure key is of type str and known""" * if not isinstance(key, str): * raise TypeError("key must be str") # <<<<<<<<<<<<<< * if key not in self.VALID_KEYS: * raise ValueError("key '%s' is not a valid key" % key) */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_key_must_be_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "borg/item.pyx":103 * attr_error_msg = "attribute %s not found" % key * * def _get(self): # <<<<<<<<<<<<<< * try: * value = self._dict[key] */ __pyx_tuple__6 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_value); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); __pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__6, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_get, 103, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(0, 103, __pyx_L1_error) /* "borg/item.pyx":112 * return value * * def _set(self, value): # <<<<<<<<<<<<<< * if not isinstance(value, value_type): * raise TypeError(type_error_msg) */ __pyx_tuple__8 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_value); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); __pyx_codeobj__9 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__8, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_set, 112, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__9)) __PYX_ERR(0, 112, __pyx_L1_error) /* "borg/item.pyx":119 * self._dict[key] = value * * def _del(self): # <<<<<<<<<<<<<< * try: * del self._dict[key] */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 119, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_del, 119, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 119, __pyx_L1_error) /* "borg/item.pyx":230 * # hardlink slave, try to fetch hardlink master's chunks list * # todo: put precomputed size into hardlink_masters' values and use it, if present * chunks, _ = hardlink_masters.get(master, (None, None)) # <<<<<<<<<<<<<< * if chunks is None: * return 0 */ __pyx_tuple__12 = PyTuple_Pack(2, Py_None, Py_None); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "borg/item.pyx":36 * VALID_KEYS = None # override with in child class * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # <<<<<<<<<<<<<< * * def __init__(self, data_dict=None, internal_dict=None, **kw): */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_n_u_dict); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "borg/item.pyx":38 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * def __init__(self, data_dict=None, internal_dict=None, **kw): # <<<<<<<<<<<<<< * self._dict = {} * if internal_dict is None: */ __pyx_tuple__14 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_data_dict, __pyx_n_s_internal_dict, __pyx_n_s_kw, __pyx_n_s_data); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(0, 38, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); __pyx_codeobj__15 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__14, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_init, 38, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__15)) __PYX_ERR(0, 38, __pyx_L1_error) __pyx_tuple__16 = PyTuple_Pack(2, ((PyObject *)Py_None), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(0, 38, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "borg/item.pyx":55 * self.update(data) * * def update(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ __pyx_tuple__17 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_d, __pyx_n_s_k, __pyx_n_s_v); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 55, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_update, 55, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 55, __pyx_L1_error) /* "borg/item.pyx":61 * setattr(self, self._check_key(k), v) * * def update_internal(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ __pyx_tuple__19 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_d, __pyx_n_s_k, __pyx_n_s_v); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_update_internal, 61, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 61, __pyx_L1_error) /* "borg/item.pyx":67 * self._dict[k] = v * * def __eq__(self, other): # <<<<<<<<<<<<<< * return self.as_dict() == other.as_dict() * */ __pyx_tuple__21 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_other); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_eq, 67, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 67, __pyx_L1_error) /* "borg/item.pyx":70 * return self.as_dict() == other.as_dict() * * def __repr__(self): # <<<<<<<<<<<<<< * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_repr, 70, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 70, __pyx_L1_error) /* "borg/item.pyx":73 * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * * def as_dict(self): # <<<<<<<<<<<<<< * """return the internal dictionary""" * return StableDict(self._dict) */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_as_dict, 73, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 73, __pyx_L1_error) /* "borg/item.pyx":77 * return StableDict(self._dict) * * def _check_key(self, key): # <<<<<<<<<<<<<< * """make sure key is of type str and known""" * if not isinstance(key, str): */ __pyx_tuple__27 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_key); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_check_key, 77, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 77, __pyx_L1_error) /* "borg/item.pyx":85 * return key * * def __contains__(self, key): # <<<<<<<<<<<<<< * """do we have this key?""" * return self._check_key(key) in self._dict */ __pyx_tuple__29 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_key); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_contains, 85, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 85, __pyx_L1_error) /* "borg/item.pyx":89 * return self._check_key(key) in self._dict * * def get(self, key, default=None): # <<<<<<<<<<<<<< * """get value for key, return default if key does not exist""" * return getattr(self, self._check_key(key), default) */ __pyx_tuple__31 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_key, __pyx_n_s_default); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_get_2, 89, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 89, __pyx_L1_error) __pyx_tuple__33 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); /* "borg/item.pyx":94 * * @staticmethod * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): # <<<<<<<<<<<<<< * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) */ __pyx_tuple__34 = PyTuple_Pack(14, __pyx_n_s_key, __pyx_n_s_value_type, __pyx_n_s_value_type_name, __pyx_n_s_encode, __pyx_n_s_decode, __pyx_n_s_doc, __pyx_n_s_type_error_msg, __pyx_n_s_attr_error_msg, __pyx_n_s_get, __pyx_n_s_get, __pyx_n_s_set, __pyx_n_s_set, __pyx_n_s_del, __pyx_n_s_del); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__34); __Pyx_GIVEREF(__pyx_tuple__34); __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(5, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_make_property, 94, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(0, 94, __pyx_L1_error) __pyx_tuple__36 = PyTuple_Pack(3, ((PyObject *)Py_None), ((PyObject *)Py_None), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__36); __Pyx_GIVEREF(__pyx_tuple__36); /* "borg/item.pyx":128 * * * ChunkListEntry = namedtuple('ChunkListEntry', 'id size csize') # <<<<<<<<<<<<<< * * class Item(PropDict): */ __pyx_tuple__37 = PyTuple_Pack(2, __pyx_n_u_ChunkListEntry, __pyx_kp_u_id_size_csize); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__37); __Pyx_GIVEREF(__pyx_tuple__37); /* "borg/item.pyx":191 * part = PropDict._make_property('part', int) * * def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): # <<<<<<<<<<<<<< * """ * Determine the (uncompressed or compressed) size of this item. */ __pyx_tuple__39 = PyTuple_Pack(13, __pyx_n_s_self, __pyx_n_s_hardlink_masters, __pyx_n_s_memorize, __pyx_n_s_compressed, __pyx_n_s_from_chunks, __pyx_n_s_attr, __pyx_n_s_size, __pyx_n_s_chunks, __pyx_n_s_having_chunks, __pyx_n_s_master, __pyx_n_s__38, __pyx_n_s_genexpr, __pyx_n_s_genexpr); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__39); __Pyx_GIVEREF(__pyx_tuple__39); __pyx_codeobj__40 = (PyObject*)__Pyx_PyCode_New(5, 0, 13, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_item_pyx, __pyx_n_s_get_size, 191, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__40)) __PYX_ERR(0, 191, __pyx_L1_error) __pyx_tuple__41 = PyTuple_Pack(4, ((PyObject *)Py_None), ((PyObject *)Py_False), ((PyObject *)Py_False), ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__41); __Pyx_GIVEREF(__pyx_tuple__41); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_4borg_4item___pyx_scope_struct___make_property) < 0) __PYX_ERR(0, 94, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_4item___pyx_scope_struct___make_property.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_4item___pyx_scope_struct___make_property.tp_dictoffset && __pyx_type_4borg_4item___pyx_scope_struct___make_property.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_4item___pyx_scope_struct___make_property.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_4borg_4item___pyx_scope_struct___make_property = &__pyx_type_4borg_4item___pyx_scope_struct___make_property; if (PyType_Ready(&__pyx_type_4borg_4item___pyx_scope_struct_1_get_size) < 0) __PYX_ERR(0, 191, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_4item___pyx_scope_struct_1_get_size.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_4item___pyx_scope_struct_1_get_size.tp_dictoffset && __pyx_type_4borg_4item___pyx_scope_struct_1_get_size.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_4item___pyx_scope_struct_1_get_size.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_4borg_4item___pyx_scope_struct_1_get_size = &__pyx_type_4borg_4item___pyx_scope_struct_1_get_size; if (PyType_Ready(&__pyx_type_4borg_4item___pyx_scope_struct_2_genexpr) < 0) __PYX_ERR(0, 233, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_4item___pyx_scope_struct_2_genexpr.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_4item___pyx_scope_struct_2_genexpr.tp_dictoffset && __pyx_type_4borg_4item___pyx_scope_struct_2_genexpr.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_4item___pyx_scope_struct_2_genexpr.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_4borg_4item___pyx_scope_struct_2_genexpr = &__pyx_type_4borg_4item___pyx_scope_struct_2_genexpr; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC inititem(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC inititem(void) #else __Pyx_PyMODINIT_FUNC PyInit_item(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_item(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_item(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'item' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_item(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("item", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_borg__item) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "borg.item")) { if (unlikely(PyDict_SetItemString(modules, "borg.item", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "borg/item.pyx":3 * # cython: language_level=3 * * import stat # <<<<<<<<<<<<<< * from collections import namedtuple * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_stat, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_stat, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":4 * * import stat * from collections import namedtuple # <<<<<<<<<<<<<< * * from .constants import ITEM_KEYS, ARCHIVE_KEYS */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_namedtuple); __Pyx_GIVEREF(__pyx_n_s_namedtuple); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_namedtuple); __pyx_t_2 = __Pyx_Import(__pyx_n_s_collections, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_namedtuple); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_namedtuple, __pyx_t_1) < 0) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/item.pyx":6 * from collections import namedtuple * * from .constants import ITEM_KEYS, ARCHIVE_KEYS # <<<<<<<<<<<<<< * from .helpers import safe_encode, safe_decode * from .helpers import bigint_to_int, int_to_bigint */ __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_ITEM_KEYS); __Pyx_GIVEREF(__pyx_n_s_ITEM_KEYS); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_ITEM_KEYS); __Pyx_INCREF(__pyx_n_s_ARCHIVE_KEYS); __Pyx_GIVEREF(__pyx_n_s_ARCHIVE_KEYS); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_ARCHIVE_KEYS); __pyx_t_1 = __Pyx_Import(__pyx_n_s_constants, __pyx_t_2, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_ITEM_KEYS); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_ITEM_KEYS, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_ARCHIVE_KEYS); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_ARCHIVE_KEYS, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":7 * * from .constants import ITEM_KEYS, ARCHIVE_KEYS * from .helpers import safe_encode, safe_decode # <<<<<<<<<<<<<< * from .helpers import bigint_to_int, int_to_bigint * from .helpers import StableDict */ __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_safe_encode); __Pyx_GIVEREF(__pyx_n_s_safe_encode); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_safe_encode); __Pyx_INCREF(__pyx_n_s_safe_decode); __Pyx_GIVEREF(__pyx_n_s_safe_decode); PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_safe_decode); __pyx_t_2 = __Pyx_Import(__pyx_n_s_helpers, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_safe_encode, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_safe_decode, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/item.pyx":8 * from .constants import ITEM_KEYS, ARCHIVE_KEYS * from .helpers import safe_encode, safe_decode * from .helpers import bigint_to_int, int_to_bigint # <<<<<<<<<<<<<< * from .helpers import StableDict * */ __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_bigint_to_int); __Pyx_GIVEREF(__pyx_n_s_bigint_to_int); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_bigint_to_int); __Pyx_INCREF(__pyx_n_s_int_to_bigint); __Pyx_GIVEREF(__pyx_n_s_int_to_bigint); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_int_to_bigint); __pyx_t_1 = __Pyx_Import(__pyx_n_s_helpers, __pyx_t_2, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_bigint_to_int); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bigint_to_int, __pyx_t_2) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_int_to_bigint); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_int_to_bigint, __pyx_t_2) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":9 * from .helpers import safe_encode, safe_decode * from .helpers import bigint_to_int, int_to_bigint * from .helpers import StableDict # <<<<<<<<<<<<<< * * API_VERSION = '1.1_03' */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_StableDict); __Pyx_GIVEREF(__pyx_n_s_StableDict); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_StableDict); __pyx_t_2 = __Pyx_Import(__pyx_n_s_helpers, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_StableDict); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_StableDict, __pyx_t_1) < 0) __PYX_ERR(0, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/item.pyx":11 * from .helpers import StableDict * * API_VERSION = '1.1_03' # <<<<<<<<<<<<<< * * */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_API_VERSION, __pyx_kp_u_1_1_03) < 0) __PYX_ERR(0, 11, __pyx_L1_error) /* "borg/item.pyx":14 * * * class PropDict: # <<<<<<<<<<<<<< * """ * Manage a dictionary via properties. */ __pyx_t_2 = __Pyx_Py3MetaclassPrepare((PyObject *) NULL, __pyx_empty_tuple, __pyx_n_s_PropDict, __pyx_n_s_PropDict, (PyObject *) NULL, __pyx_n_s_borg_item, __pyx_kp_s_Manage_a_dictionary_via_propert); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "borg/item.pyx":34 * are ignored instead of causing an error. * """ * VALID_KEYS = None # override with in child class # <<<<<<<<<<<<<< * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties */ if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_VALID_KEYS, Py_None) < 0) __PYX_ERR(0, 34, __pyx_L1_error) /* "borg/item.pyx":36 * VALID_KEYS = None # override with in child class * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # <<<<<<<<<<<<<< * * def __init__(self, data_dict=None, internal_dict=None, **kw): */ if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_slots, __pyx_tuple__13) < 0) __PYX_ERR(0, 36, __pyx_L1_error) /* "borg/item.pyx":38 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * def __init__(self, data_dict=None, internal_dict=None, **kw): # <<<<<<<<<<<<<< * self._dict = {} * if internal_dict is None: */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_1__init__, 0, __pyx_n_s_PropDict___init, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__15)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_tuple__16); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_init, __pyx_t_1) < 0) __PYX_ERR(0, 38, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":55 * self.update(data) * * def update(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_3update, 0, __pyx_n_s_PropDict_update, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__18)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_update, __pyx_t_1) < 0) __PYX_ERR(0, 55, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":61 * setattr(self, self._check_key(k), v) * * def update_internal(self, d): # <<<<<<<<<<<<<< * for k, v in d.items(): * if isinstance(k, bytes): */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_5update_internal, 0, __pyx_n_s_PropDict_update_internal, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__20)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_update_internal, __pyx_t_1) < 0) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":67 * self._dict[k] = v * * def __eq__(self, other): # <<<<<<<<<<<<<< * return self.as_dict() == other.as_dict() * */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_7__eq__, 0, __pyx_n_s_PropDict___eq, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_eq, __pyx_t_1) < 0) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":70 * return self.as_dict() == other.as_dict() * * def __repr__(self): # <<<<<<<<<<<<<< * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_9__repr__, 0, __pyx_n_s_PropDict___repr, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_repr, __pyx_t_1) < 0) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":73 * return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict) * * def as_dict(self): # <<<<<<<<<<<<<< * """return the internal dictionary""" * return StableDict(self._dict) */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_11as_dict, 0, __pyx_n_s_PropDict_as_dict, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_as_dict, __pyx_t_1) < 0) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":77 * return StableDict(self._dict) * * def _check_key(self, key): # <<<<<<<<<<<<<< * """make sure key is of type str and known""" * if not isinstance(key, str): */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_13_check_key, 0, __pyx_n_s_PropDict__check_key, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_check_key, __pyx_t_1) < 0) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":85 * return key * * def __contains__(self, key): # <<<<<<<<<<<<<< * """do we have this key?""" * return self._check_key(key) in self._dict */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_15__contains__, 0, __pyx_n_s_PropDict___contains, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_contains, __pyx_t_1) < 0) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":89 * return self._check_key(key) in self._dict * * def get(self, key, default=None): # <<<<<<<<<<<<<< * """get value for key, return default if key does not exist""" * return getattr(self, self._check_key(key), default) */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_17get, 0, __pyx_n_s_PropDict_get, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__32)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_tuple__33); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_get_2, __pyx_t_1) < 0) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/item.pyx":94 * * @staticmethod * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): # <<<<<<<<<<<<<< * """return a property that deals with self._dict[key]""" * assert isinstance(key, str) */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_8PropDict_19_make_property, __Pyx_CYFUNCTION_STATICMETHOD, __pyx_n_s_PropDict__make_property, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__35)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_tuple__36); /* "borg/item.pyx":93 * return getattr(self, self._check_key(key), default) * * @staticmethod # <<<<<<<<<<<<<< * def _make_property(key, value_type, value_type_name=None, encode=None, decode=None): * """return a property that deals with self._dict[key]""" */ __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_staticmethod, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_make_property, __pyx_t_3) < 0) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "borg/item.pyx":14 * * * class PropDict: # <<<<<<<<<<<<<< * """ * Manage a dictionary via properties. */ __pyx_t_3 = __Pyx_Py3ClassCreate(((PyObject*)&__Pyx_DefaultClassType), __pyx_n_s_PropDict, __pyx_empty_tuple, __pyx_t_2, NULL, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_PropDict, __pyx_t_3) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/item.pyx":128 * * * ChunkListEntry = namedtuple('ChunkListEntry', 'id size csize') # <<<<<<<<<<<<<< * * class Item(PropDict): */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_namedtuple); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__37, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_ChunkListEntry, __pyx_t_3) < 0) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "borg/item.pyx":130 * ChunkListEntry = namedtuple('ChunkListEntry', 'id size csize') * * class Item(PropDict): # <<<<<<<<<<<<<< * """ * Item abstraction that deals with validation and the low-level details internally: */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_2, __pyx_n_s_Item, __pyx_n_s_Item, (PyObject *) NULL, __pyx_n_s_borg_item, __pyx_kp_s_Item_abstraction_that_deals_wit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); /* "borg/item.pyx":147 * """ * * VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys # <<<<<<<<<<<<<< * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_ITEM_KEYS); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySet_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PySet_Add(__pyx_t_5, __pyx_n_u_deleted) < 0) __PYX_ERR(0, 147, __pyx_L1_error) if (PySet_Add(__pyx_t_5, __pyx_n_u_nlink) < 0) __PYX_ERR(0, 147, __pyx_L1_error) __pyx_t_6 = PyNumber_Or(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_VALID_KEYS, __pyx_t_6) < 0) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":149 * VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # <<<<<<<<<<<<<< * * # properties statically defined, so that IDEs can know their names: */ if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_slots, __pyx_tuple__13) < 0) __PYX_ERR(0, 149, __pyx_L1_error) /* "borg/item.pyx":153 * # properties statically defined, so that IDEs can know their names: * * path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_n_u_path); __Pyx_GIVEREF(__pyx_n_u_path); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_n_u_path); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_4 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_encode, __pyx_t_7) < 0) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_decode, __pyx_t_7) < 0) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_path, __pyx_t_7) < 0) __PYX_ERR(0, 153, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":154 * * path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) * group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_u_source); __Pyx_GIVEREF(__pyx_n_u_source); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_n_u_source); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_encode, __pyx_t_5) < 0) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_decode, __pyx_t_5) < 0) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_source, __pyx_t_5) < 0) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/item.pyx":155 * path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) * */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(((PyObject *)Py_TYPE(Py_None))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(Py_None))); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)Py_TYPE(Py_None))); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_u_user); __Pyx_GIVEREF(__pyx_n_u_user); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_n_u_user); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str_or_None); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str_or_None); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_kp_u_surrogate_escaped_str_or_None); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_encode, __pyx_t_4) < 0) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_decode, __pyx_t_4) < 0) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_user, __pyx_t_4) < 0) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/item.pyx":156 * source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) * group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * * acl_access = PropDict._make_property('acl_access', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(((PyObject *)Py_TYPE(Py_None))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(Py_None))); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)Py_TYPE(Py_None))); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_u_group); __Pyx_GIVEREF(__pyx_n_u_group); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_n_u_group); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str_or_None); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str_or_None); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_kp_u_surrogate_escaped_str_or_None); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_encode, __pyx_t_6) < 0) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_decode, __pyx_t_6) < 0) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_group, __pyx_t_6) < 0) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":158 * group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode) * * acl_access = PropDict._make_property('acl_access', bytes) # <<<<<<<<<<<<<< * acl_default = PropDict._make_property('acl_default', bytes) * acl_extended = PropDict._make_property('acl_extended', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_acl_access, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_acl_access, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_n_u_acl_access); __Pyx_GIVEREF(__pyx_n_u_acl_access); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_acl_access); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_acl_access, __pyx_t_6) < 0) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":159 * * acl_access = PropDict._make_property('acl_access', bytes) * acl_default = PropDict._make_property('acl_default', bytes) # <<<<<<<<<<<<<< * acl_extended = PropDict._make_property('acl_extended', bytes) * acl_nfs4 = PropDict._make_property('acl_nfs4', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_acl_default, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_acl_default, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_acl_default); __Pyx_GIVEREF(__pyx_n_u_acl_default); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_8, __pyx_n_u_acl_default); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_acl_default, __pyx_t_6) < 0) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":160 * acl_access = PropDict._make_property('acl_access', bytes) * acl_default = PropDict._make_property('acl_default', bytes) * acl_extended = PropDict._make_property('acl_extended', bytes) # <<<<<<<<<<<<<< * acl_nfs4 = PropDict._make_property('acl_nfs4', bytes) * */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_acl_extended, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_acl_extended, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_acl_extended); __Pyx_GIVEREF(__pyx_n_u_acl_extended); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_8, __pyx_n_u_acl_extended); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_acl_extended, __pyx_t_6) < 0) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":161 * acl_default = PropDict._make_property('acl_default', bytes) * acl_extended = PropDict._make_property('acl_extended', bytes) * acl_nfs4 = PropDict._make_property('acl_nfs4', bytes) # <<<<<<<<<<<<<< * * mode = PropDict._make_property('mode', int) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_acl_nfs4, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_acl_nfs4, ((PyObject *)(&PyBytes_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_n_u_acl_nfs4); __Pyx_GIVEREF(__pyx_n_u_acl_nfs4); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_acl_nfs4); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_acl_nfs4, __pyx_t_6) < 0) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":163 * acl_nfs4 = PropDict._make_property('acl_nfs4', bytes) * * mode = PropDict._make_property('mode', int) # <<<<<<<<<<<<<< * uid = PropDict._make_property('uid', int) * gid = PropDict._make_property('gid', int) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_mode, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_mode, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_mode); __Pyx_GIVEREF(__pyx_n_u_mode); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_8, __pyx_n_u_mode); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_mode, __pyx_t_6) < 0) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":164 * * mode = PropDict._make_property('mode', int) * uid = PropDict._make_property('uid', int) # <<<<<<<<<<<<<< * gid = PropDict._make_property('gid', int) * rdev = PropDict._make_property('rdev', int) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_uid, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_uid, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_uid); __Pyx_GIVEREF(__pyx_n_u_uid); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_8, __pyx_n_u_uid); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_uid, __pyx_t_6) < 0) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":165 * mode = PropDict._make_property('mode', int) * uid = PropDict._make_property('uid', int) * gid = PropDict._make_property('gid', int) # <<<<<<<<<<<<<< * rdev = PropDict._make_property('rdev', int) * bsdflags = PropDict._make_property('bsdflags', int) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_gid, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_gid, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_n_u_gid); __Pyx_GIVEREF(__pyx_n_u_gid); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_gid); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_gid, __pyx_t_6) < 0) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":166 * uid = PropDict._make_property('uid', int) * gid = PropDict._make_property('gid', int) * rdev = PropDict._make_property('rdev', int) # <<<<<<<<<<<<<< * bsdflags = PropDict._make_property('bsdflags', int) * */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_rdev, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_rdev, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_rdev); __Pyx_GIVEREF(__pyx_n_u_rdev); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_8, __pyx_n_u_rdev); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_rdev, __pyx_t_6) < 0) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":167 * gid = PropDict._make_property('gid', int) * rdev = PropDict._make_property('rdev', int) * bsdflags = PropDict._make_property('bsdflags', int) # <<<<<<<<<<<<<< * * # note: we need to keep the bigint conversion for compatibility with borg 1.0 archives. */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 167, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 167, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_bsdflags, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 167, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_bsdflags, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 167, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 167, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_bsdflags); __Pyx_GIVEREF(__pyx_n_u_bsdflags); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_8, __pyx_n_u_bsdflags); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 167, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_bsdflags, __pyx_t_6) < 0) __PYX_ERR(0, 167, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":170 * * # note: we need to keep the bigint conversion for compatibility with borg 1.0 archives. * atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) # <<<<<<<<<<<<<< * ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_n_u_atime); __Pyx_GIVEREF(__pyx_n_u_atime); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_n_u_atime); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)(&PyInt_Type))); __Pyx_INCREF(__pyx_n_u_bigint); __Pyx_GIVEREF(__pyx_n_u_bigint); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_n_u_bigint); __pyx_t_7 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_int_to_bigint); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_encode, __pyx_t_5) < 0) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_bigint_to_int); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_decode, __pyx_t_5) < 0) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_atime, __pyx_t_5) < 0) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/item.pyx":171 * # note: we need to keep the bigint conversion for compatibility with borg 1.0 archives. * atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) # <<<<<<<<<<<<<< * mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * birthtime = PropDict._make_property('birthtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_u_ctime); __Pyx_GIVEREF(__pyx_n_u_ctime); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_n_u_ctime); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)(&PyInt_Type))); __Pyx_INCREF(__pyx_n_u_bigint); __Pyx_GIVEREF(__pyx_n_u_bigint); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_n_u_bigint); __pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_int_to_bigint); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_encode, __pyx_t_4) < 0) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_bigint_to_int); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_decode, __pyx_t_4) < 0) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_ctime, __pyx_t_4) < 0) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/item.pyx":172 * atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) # <<<<<<<<<<<<<< * birthtime = PropDict._make_property('birthtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_u_mtime); __Pyx_GIVEREF(__pyx_n_u_mtime); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_n_u_mtime); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)(&PyInt_Type))); __Pyx_INCREF(__pyx_n_u_bigint); __Pyx_GIVEREF(__pyx_n_u_bigint); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_n_u_bigint); __pyx_t_5 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_int_to_bigint); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_encode, __pyx_t_7) < 0) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_bigint_to_int); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_decode, __pyx_t_7) < 0) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_mtime, __pyx_t_7) < 0) __PYX_ERR(0, 172, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":173 * ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) * birthtime = PropDict._make_property('birthtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int) # <<<<<<<<<<<<<< * * # size is only present for items with a chunk list and then it is sum(chunk_sizes) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_u_birthtime); __Pyx_GIVEREF(__pyx_n_u_birthtime); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_n_u_birthtime); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)(&PyInt_Type))); __Pyx_INCREF(__pyx_n_u_bigint); __Pyx_GIVEREF(__pyx_n_u_bigint); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_n_u_bigint); __pyx_t_4 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_int_to_bigint); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_encode, __pyx_t_6) < 0) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_bigint_to_int); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_decode, __pyx_t_6) < 0) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_birthtime, __pyx_t_6) < 0) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":177 * # size is only present for items with a chunk list and then it is sum(chunk_sizes) * # compatibility note: this is a new feature, in old archives size will be missing. * size = PropDict._make_property('size', int) # <<<<<<<<<<<<<< * * hardlink_master = PropDict._make_property('hardlink_master', bool) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_size, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_size, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_n_u_size); __Pyx_GIVEREF(__pyx_n_u_size); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_size); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_size, __pyx_t_6) < 0) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":179 * size = PropDict._make_property('size', int) * * hardlink_master = PropDict._make_property('hardlink_master', bool) # <<<<<<<<<<<<<< * * chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None') */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_hardlink_master, ((PyObject*)&PyBool_Type)}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_hardlink_master, ((PyObject*)&PyBool_Type)}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_hardlink_master); __Pyx_GIVEREF(__pyx_n_u_hardlink_master); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_8, __pyx_n_u_hardlink_master); __Pyx_INCREF(((PyObject*)&PyBool_Type)); __Pyx_GIVEREF(((PyObject*)&PyBool_Type)); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, ((PyObject*)&PyBool_Type)); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_hardlink_master, __pyx_t_6) < 0) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":181 * hardlink_master = PropDict._make_property('hardlink_master', bool) * * chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None') # <<<<<<<<<<<<<< * chunks_healthy = PropDict._make_property('chunks_healthy', (list, type(None)), 'list or None') * */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)(&PyList_Type))); __Pyx_INCREF(((PyObject *)Py_TYPE(Py_None))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(Py_None))); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)Py_TYPE(Py_None))); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_7, __pyx_n_u_chunks, __pyx_t_5, __pyx_kp_u_list_or_None}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_7, __pyx_n_u_chunks, __pyx_t_5, __pyx_kp_u_list_or_None}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif { __pyx_t_9 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_chunks); __Pyx_GIVEREF(__pyx_n_u_chunks); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_chunks); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_t_5); __Pyx_INCREF(__pyx_kp_u_list_or_None); __Pyx_GIVEREF(__pyx_kp_u_list_or_None); PyTuple_SET_ITEM(__pyx_t_9, 2+__pyx_t_8, __pyx_kp_u_list_or_None); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_chunks, __pyx_t_6) < 0) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":182 * * chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None') * chunks_healthy = PropDict._make_property('chunks_healthy', (list, type(None)), 'list or None') # <<<<<<<<<<<<<< * * xattrs = PropDict._make_property('xattrs', StableDict) */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)(&PyList_Type))); __Pyx_INCREF(((PyObject *)Py_TYPE(Py_None))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(Py_None))); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)Py_TYPE(Py_None))); __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[4] = {__pyx_t_5, __pyx_n_u_chunks_healthy, __pyx_t_4, __pyx_kp_u_list_or_None}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[4] = {__pyx_t_5, __pyx_n_u_chunks_healthy, __pyx_t_4, __pyx_kp_u_list_or_None}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_7 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_chunks_healthy); __Pyx_GIVEREF(__pyx_n_u_chunks_healthy); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_8, __pyx_n_u_chunks_healthy); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_8, __pyx_t_4); __Pyx_INCREF(__pyx_kp_u_list_or_None); __Pyx_GIVEREF(__pyx_kp_u_list_or_None); PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_8, __pyx_kp_u_list_or_None); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_chunks_healthy, __pyx_t_6) < 0) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":184 * chunks_healthy = PropDict._make_property('chunks_healthy', (list, type(None)), 'list or None') * * xattrs = PropDict._make_property('xattrs', StableDict) # <<<<<<<<<<<<<< * * deleted = PropDict._make_property('deleted', bool) */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_StableDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_xattrs, __pyx_t_9}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_n_u_xattrs, __pyx_t_9}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_n_u_xattrs); __Pyx_GIVEREF(__pyx_n_u_xattrs); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_xattrs); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_xattrs, __pyx_t_6) < 0) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":186 * xattrs = PropDict._make_property('xattrs', StableDict) * * deleted = PropDict._make_property('deleted', bool) # <<<<<<<<<<<<<< * nlink = PropDict._make_property('nlink', int) * */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_deleted, ((PyObject*)&PyBool_Type)}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_deleted, ((PyObject*)&PyBool_Type)}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_deleted); __Pyx_GIVEREF(__pyx_n_u_deleted); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_deleted); __Pyx_INCREF(((PyObject*)&PyBool_Type)); __Pyx_GIVEREF(((PyObject*)&PyBool_Type)); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject*)&PyBool_Type)); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_deleted, __pyx_t_6) < 0) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":187 * * deleted = PropDict._make_property('deleted', bool) * nlink = PropDict._make_property('nlink', int) # <<<<<<<<<<<<<< * * part = PropDict._make_property('part', int) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_nlink, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_nlink, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_nlink); __Pyx_GIVEREF(__pyx_n_u_nlink); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_8, __pyx_n_u_nlink); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_nlink, __pyx_t_6) < 0) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":189 * nlink = PropDict._make_property('nlink', int) * * part = PropDict._make_property('part', int) # <<<<<<<<<<<<<< * * def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_part, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_part, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_part); __Pyx_GIVEREF(__pyx_n_u_part); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_part); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_part, __pyx_t_6) < 0) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":191 * part = PropDict._make_property('part', int) * * def get_size(self, hardlink_masters=None, memorize=False, compressed=False, from_chunks=False): # <<<<<<<<<<<<<< * """ * Determine the (uncompressed or compressed) size of this item. */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_4item_4Item_1get_size, 0, __pyx_n_s_Item_get_size, NULL, __pyx_n_s_borg_item, __pyx_d, ((PyObject *)__pyx_codeobj__40)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_6, __pyx_tuple__41); if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_get_size, __pyx_t_6) < 0) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":130 * ChunkListEntry = namedtuple('ChunkListEntry', 'id size csize') * * class Item(PropDict): # <<<<<<<<<<<<<< * """ * Item abstraction that deals with validation and the low-level details internally: */ __pyx_t_6 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_Item, __pyx_t_2, __pyx_t_1, NULL, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Item, __pyx_t_6) < 0) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/item.pyx":240 * * * class EncryptedKey(PropDict): # <<<<<<<<<<<<<< * """ * EncryptedKey abstraction that deals with validation and the low-level details internally: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_CalculateMetaclass(NULL, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_Py3MetaclassPrepare(__pyx_t_2, __pyx_t_3, __pyx_n_s_EncryptedKey, __pyx_n_s_EncryptedKey, (PyObject *) NULL, __pyx_n_s_borg_item, __pyx_kp_s_EncryptedKey_abstraction_that_d); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); /* "borg/item.pyx":252 * """ * * VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys # <<<<<<<<<<<<<< * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties */ __pyx_t_6 = PySet_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PySet_Add(__pyx_t_6, __pyx_n_u_version) < 0) __PYX_ERR(0, 252, __pyx_L1_error) if (PySet_Add(__pyx_t_6, __pyx_n_u_algorithm) < 0) __PYX_ERR(0, 252, __pyx_L1_error) if (PySet_Add(__pyx_t_6, __pyx_n_u_iterations) < 0) __PYX_ERR(0, 252, __pyx_L1_error) if (PySet_Add(__pyx_t_6, __pyx_n_u_salt) < 0) __PYX_ERR(0, 252, __pyx_L1_error) if (PySet_Add(__pyx_t_6, __pyx_n_u_hash) < 0) __PYX_ERR(0, 252, __pyx_L1_error) if (PySet_Add(__pyx_t_6, __pyx_n_u_data) < 0) __PYX_ERR(0, 252, __pyx_L1_error) if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_VALID_KEYS, __pyx_t_6) < 0) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":254 * VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # <<<<<<<<<<<<<< * * version = PropDict._make_property('version', int) */ if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_slots, __pyx_tuple__13) < 0) __PYX_ERR(0, 254, __pyx_L1_error) /* "borg/item.pyx":256 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * version = PropDict._make_property('version', int) # <<<<<<<<<<<<<< * algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode) * iterations = PropDict._make_property('iterations', int) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_version); __Pyx_GIVEREF(__pyx_n_u_version); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_version); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_version, __pyx_t_6) < 0) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":257 * * version = PropDict._make_property('version', int) * algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode) # <<<<<<<<<<<<<< * iterations = PropDict._make_property('iterations', int) * salt = PropDict._make_property('salt', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_n_u_algorithm); __Pyx_GIVEREF(__pyx_n_u_algorithm); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_n_u_algorithm); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)(&PyUnicode_Type))); __pyx_t_9 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)(&PyUnicode_Type)), __pyx_n_s_encode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_encode, __pyx_t_7) < 0) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)(&PyBytes_Type)), __pyx_n_s_decode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_decode, __pyx_t_7) < 0) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, __pyx_t_9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_algorithm, __pyx_t_7) < 0) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":258 * version = PropDict._make_property('version', int) * algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode) * iterations = PropDict._make_property('iterations', int) # <<<<<<<<<<<<<< * salt = PropDict._make_property('salt', bytes) * hash = PropDict._make_property('hash', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_iterations, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_iterations, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_iterations); __Pyx_GIVEREF(__pyx_n_u_iterations); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_iterations); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_iterations, __pyx_t_7) < 0) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":259 * algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode) * iterations = PropDict._make_property('iterations', int) * salt = PropDict._make_property('salt', bytes) # <<<<<<<<<<<<<< * hash = PropDict._make_property('hash', bytes) * data = PropDict._make_property('data', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_salt, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_salt, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_salt); __Pyx_GIVEREF(__pyx_n_u_salt); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_salt); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_salt, __pyx_t_7) < 0) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":260 * iterations = PropDict._make_property('iterations', int) * salt = PropDict._make_property('salt', bytes) * hash = PropDict._make_property('hash', bytes) # <<<<<<<<<<<<<< * data = PropDict._make_property('data', bytes) * */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_hash, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_hash, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_hash); __Pyx_GIVEREF(__pyx_n_u_hash); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_n_u_hash); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_hash, __pyx_t_7) < 0) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":261 * salt = PropDict._make_property('salt', bytes) * hash = PropDict._make_property('hash', bytes) * data = PropDict._make_property('data', bytes) # <<<<<<<<<<<<<< * * */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_data, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_data, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_data); __Pyx_GIVEREF(__pyx_n_u_data); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_data); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_data, __pyx_t_7) < 0) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":240 * * * class EncryptedKey(PropDict): # <<<<<<<<<<<<<< * """ * EncryptedKey abstraction that deals with validation and the low-level details internally: */ __pyx_t_7 = __Pyx_Py3ClassCreate(__pyx_t_2, __pyx_n_s_EncryptedKey, __pyx_t_3, __pyx_t_1, NULL, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_EncryptedKey, __pyx_t_7) < 0) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "borg/item.pyx":264 * * * class Key(PropDict): # <<<<<<<<<<<<<< * """ * Key abstraction that deals with validation and the low-level details internally: */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_2, __pyx_n_s_Key, __pyx_n_s_Key, (PyObject *) NULL, __pyx_n_s_borg_item, __pyx_kp_s_Key_abstraction_that_deals_with); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); /* "borg/item.pyx":276 * """ * * VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed', 'tam_required'} # str-typed keys # <<<<<<<<<<<<<< * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties */ __pyx_t_7 = PySet_New(0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PySet_Add(__pyx_t_7, __pyx_n_u_version) < 0) __PYX_ERR(0, 276, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_repository_id) < 0) __PYX_ERR(0, 276, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_enc_key) < 0) __PYX_ERR(0, 276, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_enc_hmac_key) < 0) __PYX_ERR(0, 276, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_id_key) < 0) __PYX_ERR(0, 276, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_chunk_seed) < 0) __PYX_ERR(0, 276, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_tam_required) < 0) __PYX_ERR(0, 276, __pyx_L1_error) if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_VALID_KEYS, __pyx_t_7) < 0) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":278 * VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed', 'tam_required'} # str-typed keys * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # <<<<<<<<<<<<<< * * version = PropDict._make_property('version', int) */ if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_slots, __pyx_tuple__13) < 0) __PYX_ERR(0, 278, __pyx_L1_error) /* "borg/item.pyx":280 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * version = PropDict._make_property('version', int) # <<<<<<<<<<<<<< * repository_id = PropDict._make_property('repository_id', bytes) * enc_key = PropDict._make_property('enc_key', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_version); __Pyx_GIVEREF(__pyx_n_u_version); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_version); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_version, __pyx_t_7) < 0) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":281 * * version = PropDict._make_property('version', int) * repository_id = PropDict._make_property('repository_id', bytes) # <<<<<<<<<<<<<< * enc_key = PropDict._make_property('enc_key', bytes) * enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_repository_id, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_repository_id, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_repository_id); __Pyx_GIVEREF(__pyx_n_u_repository_id); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_n_u_repository_id); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_repository_id, __pyx_t_7) < 0) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":282 * version = PropDict._make_property('version', int) * repository_id = PropDict._make_property('repository_id', bytes) * enc_key = PropDict._make_property('enc_key', bytes) # <<<<<<<<<<<<<< * enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes) * id_key = PropDict._make_property('id_key', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_enc_key, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_enc_key, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_enc_key); __Pyx_GIVEREF(__pyx_n_u_enc_key); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_enc_key); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_enc_key, __pyx_t_7) < 0) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":283 * repository_id = PropDict._make_property('repository_id', bytes) * enc_key = PropDict._make_property('enc_key', bytes) * enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes) # <<<<<<<<<<<<<< * id_key = PropDict._make_property('id_key', bytes) * chunk_seed = PropDict._make_property('chunk_seed', int) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_enc_hmac_key, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_enc_hmac_key, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_enc_hmac_key); __Pyx_GIVEREF(__pyx_n_u_enc_hmac_key); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_enc_hmac_key); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_enc_hmac_key, __pyx_t_7) < 0) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":284 * enc_key = PropDict._make_property('enc_key', bytes) * enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes) * id_key = PropDict._make_property('id_key', bytes) # <<<<<<<<<<<<<< * chunk_seed = PropDict._make_property('chunk_seed', int) * tam_required = PropDict._make_property('tam_required', bool) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_id_key, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_id_key, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_id_key); __Pyx_GIVEREF(__pyx_n_u_id_key); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_n_u_id_key); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_id_key, __pyx_t_7) < 0) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":285 * enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes) * id_key = PropDict._make_property('id_key', bytes) * chunk_seed = PropDict._make_property('chunk_seed', int) # <<<<<<<<<<<<<< * tam_required = PropDict._make_property('tam_required', bool) * */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_chunk_seed, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 285, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_chunk_seed, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 285, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_chunk_seed); __Pyx_GIVEREF(__pyx_n_u_chunk_seed); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_chunk_seed); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_chunk_seed, __pyx_t_7) < 0) __PYX_ERR(0, 285, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":286 * id_key = PropDict._make_property('id_key', bytes) * chunk_seed = PropDict._make_property('chunk_seed', int) * tam_required = PropDict._make_property('tam_required', bool) # <<<<<<<<<<<<<< * * */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_tam_required, ((PyObject*)&PyBool_Type)}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 286, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_tam_required, ((PyObject*)&PyBool_Type)}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 286, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_tam_required); __Pyx_GIVEREF(__pyx_n_u_tam_required); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_tam_required); __Pyx_INCREF(((PyObject*)&PyBool_Type)); __Pyx_GIVEREF(((PyObject*)&PyBool_Type)); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject*)&PyBool_Type)); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_tam_required, __pyx_t_7) < 0) __PYX_ERR(0, 286, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":264 * * * class Key(PropDict): # <<<<<<<<<<<<<< * """ * Key abstraction that deals with validation and the low-level details internally: */ __pyx_t_7 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_Key, __pyx_t_2, __pyx_t_1, NULL, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Key, __pyx_t_7) < 0) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/item.pyx":289 * * * class ArchiveItem(PropDict): # <<<<<<<<<<<<<< * """ * ArchiveItem abstraction that deals with validation and the low-level details internally: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_CalculateMetaclass(NULL, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_Py3MetaclassPrepare(__pyx_t_2, __pyx_t_3, __pyx_n_s_ArchiveItem, __pyx_n_s_ArchiveItem, (PyObject *) NULL, __pyx_n_s_borg_item, __pyx_kp_s_ArchiveItem_abstraction_that_de); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); /* "borg/item.pyx":301 * """ * * VALID_KEYS = ARCHIVE_KEYS # str-typed keys # <<<<<<<<<<<<<< * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_ARCHIVE_KEYS); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_VALID_KEYS, __pyx_t_7) < 0) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":303 * VALID_KEYS = ARCHIVE_KEYS # str-typed keys * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # <<<<<<<<<<<<<< * * version = PropDict._make_property('version', int) */ if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_slots, __pyx_tuple__13) < 0) __PYX_ERR(0, 303, __pyx_L1_error) /* "borg/item.pyx":305 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * version = PropDict._make_property('version', int) # <<<<<<<<<<<<<< * name = PropDict._make_property('name', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * items = PropDict._make_property('items', list) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_version); __Pyx_GIVEREF(__pyx_n_u_version); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_n_u_version); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_version, __pyx_t_7) < 0) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":306 * * version = PropDict._make_property('version', int) * name = PropDict._make_property('name', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * items = PropDict._make_property('items', list) * cmdline = PropDict._make_property('cmdline', list) # list of s-e-str */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_u_name_2); __Pyx_GIVEREF(__pyx_n_u_name_2); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_n_u_name_2); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_encode, __pyx_t_5) < 0) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_decode, __pyx_t_5) < 0) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_name_2, __pyx_t_5) < 0) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/item.pyx":307 * version = PropDict._make_property('version', int) * name = PropDict._make_property('name', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * items = PropDict._make_property('items', list) # <<<<<<<<<<<<<< * cmdline = PropDict._make_property('cmdline', list) # list of s-e-str * hostname = PropDict._make_property('hostname', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_items, ((PyObject *)(&PyList_Type))}; __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_items, ((PyObject *)(&PyList_Type))}; __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_items); __Pyx_GIVEREF(__pyx_n_u_items); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_items); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyList_Type))); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_items, __pyx_t_5) < 0) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/item.pyx":308 * name = PropDict._make_property('name', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * items = PropDict._make_property('items', list) * cmdline = PropDict._make_property('cmdline', list) # list of s-e-str # <<<<<<<<<<<<<< * hostname = PropDict._make_property('hostname', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * username = PropDict._make_property('username', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_cmdline, ((PyObject *)(&PyList_Type))}; __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_cmdline, ((PyObject *)(&PyList_Type))}; __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_cmdline); __Pyx_GIVEREF(__pyx_n_u_cmdline); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_n_u_cmdline); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, ((PyObject *)(&PyList_Type))); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_cmdline, __pyx_t_5) < 0) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/item.pyx":309 * items = PropDict._make_property('items', list) * cmdline = PropDict._make_property('cmdline', list) # list of s-e-str * hostname = PropDict._make_property('hostname', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * username = PropDict._make_property('username', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * time = PropDict._make_property('time', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_u_hostname); __Pyx_GIVEREF(__pyx_n_u_hostname); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_n_u_hostname); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_encode, __pyx_t_7) < 0) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_decode, __pyx_t_7) < 0) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_hostname, __pyx_t_7) < 0) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":310 * cmdline = PropDict._make_property('cmdline', list) # list of s-e-str * hostname = PropDict._make_property('hostname', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * username = PropDict._make_property('username', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * time = PropDict._make_property('time', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * time_end = PropDict._make_property('time_end', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_u_username); __Pyx_GIVEREF(__pyx_n_u_username); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_n_u_username); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_5 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_encode, __pyx_t_9) < 0) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_decode, __pyx_t_9) < 0) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_username, __pyx_t_9) < 0) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/item.pyx":311 * hostname = PropDict._make_property('hostname', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * username = PropDict._make_property('username', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * time = PropDict._make_property('time', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * time_end = PropDict._make_property('time_end', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * comment = PropDict._make_property('comment', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_n_u_time); __Pyx_GIVEREF(__pyx_n_u_time); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_n_u_time); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_7 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_encode, __pyx_t_6) < 0) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_decode, __pyx_t_6) < 0) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, __pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_time, __pyx_t_6) < 0) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":312 * username = PropDict._make_property('username', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * time = PropDict._make_property('time', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * time_end = PropDict._make_property('time_end', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * comment = PropDict._make_property('comment', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * chunker_params = PropDict._make_property('chunker_params', tuple) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_n_u_time_end); __Pyx_GIVEREF(__pyx_n_u_time_end); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_n_u_time_end); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_9 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_encode, __pyx_t_5) < 0) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_decode, __pyx_t_5) < 0) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_time_end, __pyx_t_5) < 0) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/item.pyx":313 * time = PropDict._make_property('time', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * time_end = PropDict._make_property('time_end', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * comment = PropDict._make_property('comment', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * chunker_params = PropDict._make_property('chunker_params', tuple) * recreate_cmdline = PropDict._make_property('recreate_cmdline', list) # list of s-e-str */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_u_comment); __Pyx_GIVEREF(__pyx_n_u_comment); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_n_u_comment); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_encode, __pyx_t_7) < 0) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_decode, __pyx_t_7) < 0) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_comment, __pyx_t_7) < 0) __PYX_ERR(0, 313, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":314 * time_end = PropDict._make_property('time_end', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * comment = PropDict._make_property('comment', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * chunker_params = PropDict._make_property('chunker_params', tuple) # <<<<<<<<<<<<<< * recreate_cmdline = PropDict._make_property('recreate_cmdline', list) # list of s-e-str * # recreate_source_id, recreate_args, recreate_partial_chunks were used in 1.1.0b1 .. b2 */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_chunker_params, ((PyObject *)(&PyTuple_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_chunker_params, ((PyObject *)(&PyTuple_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_chunker_params); __Pyx_GIVEREF(__pyx_n_u_chunker_params); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_chunker_params); __Pyx_INCREF(((PyObject *)(&PyTuple_Type))); __Pyx_GIVEREF(((PyObject *)(&PyTuple_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyTuple_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_chunker_params, __pyx_t_7) < 0) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":315 * comment = PropDict._make_property('comment', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * chunker_params = PropDict._make_property('chunker_params', tuple) * recreate_cmdline = PropDict._make_property('recreate_cmdline', list) # list of s-e-str # <<<<<<<<<<<<<< * # recreate_source_id, recreate_args, recreate_partial_chunks were used in 1.1.0b1 .. b2 * recreate_source_id = PropDict._make_property('recreate_source_id', bytes) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_recreate_cmdline, ((PyObject *)(&PyList_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_recreate_cmdline, ((PyObject *)(&PyList_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_recreate_cmdline); __Pyx_GIVEREF(__pyx_n_u_recreate_cmdline); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_n_u_recreate_cmdline); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, ((PyObject *)(&PyList_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_recreate_cmdline, __pyx_t_7) < 0) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":317 * recreate_cmdline = PropDict._make_property('recreate_cmdline', list) # list of s-e-str * # recreate_source_id, recreate_args, recreate_partial_chunks were used in 1.1.0b1 .. b2 * recreate_source_id = PropDict._make_property('recreate_source_id', bytes) # <<<<<<<<<<<<<< * recreate_args = PropDict._make_property('recreate_args', list) # list of s-e-str * recreate_partial_chunks = PropDict._make_property('recreate_partial_chunks', list) # list of tuples */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_recreate_source_id, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_recreate_source_id, ((PyObject *)(&PyBytes_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_recreate_source_id); __Pyx_GIVEREF(__pyx_n_u_recreate_source_id); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_recreate_source_id); __Pyx_INCREF(((PyObject *)(&PyBytes_Type))); __Pyx_GIVEREF(((PyObject *)(&PyBytes_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyBytes_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_recreate_source_id, __pyx_t_7) < 0) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":318 * # recreate_source_id, recreate_args, recreate_partial_chunks were used in 1.1.0b1 .. b2 * recreate_source_id = PropDict._make_property('recreate_source_id', bytes) * recreate_args = PropDict._make_property('recreate_args', list) # list of s-e-str # <<<<<<<<<<<<<< * recreate_partial_chunks = PropDict._make_property('recreate_partial_chunks', list) # list of tuples * */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_recreate_args, ((PyObject *)(&PyList_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_recreate_args, ((PyObject *)(&PyList_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_recreate_args); __Pyx_GIVEREF(__pyx_n_u_recreate_args); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_recreate_args); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyList_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_recreate_args, __pyx_t_7) < 0) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":319 * recreate_source_id = PropDict._make_property('recreate_source_id', bytes) * recreate_args = PropDict._make_property('recreate_args', list) # list of s-e-str * recreate_partial_chunks = PropDict._make_property('recreate_partial_chunks', list) # list of tuples # <<<<<<<<<<<<<< * * */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_make_property); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_recreate_partial_chunks, ((PyObject *)(&PyList_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_n_u_recreate_partial_chunks, ((PyObject *)(&PyList_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_n_u_recreate_partial_chunks); __Pyx_GIVEREF(__pyx_n_u_recreate_partial_chunks); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_n_u_recreate_partial_chunks); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, ((PyObject *)(&PyList_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_recreate_partial_chunks, __pyx_t_7) < 0) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":289 * * * class ArchiveItem(PropDict): # <<<<<<<<<<<<<< * """ * ArchiveItem abstraction that deals with validation and the low-level details internally: */ __pyx_t_7 = __Pyx_Py3ClassCreate(__pyx_t_2, __pyx_n_s_ArchiveItem, __pyx_t_3, __pyx_t_1, NULL, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_ArchiveItem, __pyx_t_7) < 0) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "borg/item.pyx":322 * * * class ManifestItem(PropDict): # <<<<<<<<<<<<<< * """ * ManifestItem abstraction that deals with validation and the low-level details internally: */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_2, __pyx_n_s_ManifestItem, __pyx_n_s_ManifestItem, (PyObject *) NULL, __pyx_n_s_borg_item, __pyx_kp_s_ManifestItem_abstraction_that_d); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); /* "borg/item.pyx":334 * """ * * VALID_KEYS = {'version', 'archives', 'timestamp', 'config', 'item_keys', } # str-typed keys # <<<<<<<<<<<<<< * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties */ __pyx_t_7 = PySet_New(0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 334, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PySet_Add(__pyx_t_7, __pyx_n_u_version) < 0) __PYX_ERR(0, 334, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_archives) < 0) __PYX_ERR(0, 334, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_timestamp) < 0) __PYX_ERR(0, 334, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_config) < 0) __PYX_ERR(0, 334, __pyx_L1_error) if (PySet_Add(__pyx_t_7, __pyx_n_u_item_keys) < 0) __PYX_ERR(0, 334, __pyx_L1_error) if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_VALID_KEYS, __pyx_t_7) < 0) __PYX_ERR(0, 334, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":336 * VALID_KEYS = {'version', 'archives', 'timestamp', 'config', 'item_keys', } # str-typed keys * * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties # <<<<<<<<<<<<<< * * version = PropDict._make_property('version', int) */ if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_slots, __pyx_tuple__13) < 0) __PYX_ERR(0, 336, __pyx_L1_error) /* "borg/item.pyx":338 * __slots__ = ("_dict", ) # avoid setting attributes not supported by properties * * version = PropDict._make_property('version', int) # <<<<<<<<<<<<<< * archives = PropDict._make_property('archives', dict) # name -> dict * timestamp = PropDict._make_property('timestamp', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_version, ((PyObject *)(&PyInt_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_version); __Pyx_GIVEREF(__pyx_n_u_version); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_version); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyInt_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_version, __pyx_t_7) < 0) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":339 * * version = PropDict._make_property('version', int) * archives = PropDict._make_property('archives', dict) # name -> dict # <<<<<<<<<<<<<< * timestamp = PropDict._make_property('timestamp', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * config = PropDict._make_property('config', dict) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_archives, ((PyObject *)(&PyDict_Type))}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_n_u_archives, ((PyObject *)(&PyDict_Type))}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_n_u_archives); __Pyx_GIVEREF(__pyx_n_u_archives); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_archives); __Pyx_INCREF(((PyObject *)(&PyDict_Type))); __Pyx_GIVEREF(((PyObject *)(&PyDict_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyDict_Type))); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_archives, __pyx_t_7) < 0) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "borg/item.pyx":340 * version = PropDict._make_property('version', int) * archives = PropDict._make_property('archives', dict) # name -> dict * timestamp = PropDict._make_property('timestamp', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) # <<<<<<<<<<<<<< * config = PropDict._make_property('config', dict) * item_keys = PropDict._make_property('item_keys', tuple) */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_u_timestamp); __Pyx_GIVEREF(__pyx_n_u_timestamp); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_n_u_timestamp); __Pyx_INCREF(((PyObject *)(&PyUnicode_Type))); __Pyx_GIVEREF(((PyObject *)(&PyUnicode_Type))); PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)(&PyUnicode_Type))); __Pyx_INCREF(__pyx_kp_u_surrogate_escaped_str); __Pyx_GIVEREF(__pyx_kp_u_surrogate_escaped_str); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_kp_u_surrogate_escaped_str); __pyx_t_9 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_safe_encode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_encode, __pyx_t_6) < 0) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_safe_decode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_decode, __pyx_t_6) < 0) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, __pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_timestamp, __pyx_t_6) < 0) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":341 * archives = PropDict._make_property('archives', dict) # name -> dict * timestamp = PropDict._make_property('timestamp', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * config = PropDict._make_property('config', dict) # <<<<<<<<<<<<<< * item_keys = PropDict._make_property('item_keys', tuple) */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_make_property); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_config, ((PyObject *)(&PyDict_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 341, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_n_u_config, ((PyObject *)(&PyDict_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 341, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_INCREF(__pyx_n_u_config); __Pyx_GIVEREF(__pyx_n_u_config); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_8, __pyx_n_u_config); __Pyx_INCREF(((PyObject *)(&PyDict_Type))); __Pyx_GIVEREF(((PyObject *)(&PyDict_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, ((PyObject *)(&PyDict_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_config, __pyx_t_6) < 0) __PYX_ERR(0, 341, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":342 * timestamp = PropDict._make_property('timestamp', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode) * config = PropDict._make_property('config', dict) * item_keys = PropDict._make_property('item_keys', tuple) # <<<<<<<<<<<<<< */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_PropDict); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_make_property); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_item_keys, ((PyObject *)(&PyTuple_Type))}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_n_u_item_keys, ((PyObject *)(&PyTuple_Type))}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_n_u_item_keys); __Pyx_GIVEREF(__pyx_n_u_item_keys); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_n_u_item_keys); __Pyx_INCREF(((PyObject *)(&PyTuple_Type))); __Pyx_GIVEREF(((PyObject *)(&PyTuple_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)(&PyTuple_Type))); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_item_keys, __pyx_t_6) < 0) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/item.pyx":322 * * * class ManifestItem(PropDict): # <<<<<<<<<<<<<< * """ * ManifestItem abstraction that deals with validation and the low-level details internally: */ __pyx_t_6 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_ManifestItem, __pyx_t_2, __pyx_t_1, NULL, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_d, __pyx_n_s_ManifestItem, __pyx_t_6) < 0) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/item.pyx":1 * # cython: language_level=3 # <<<<<<<<<<<<<< * * import stat */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init borg.item", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init borg.item"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* PyObjectSetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_setattro)) return tp->tp_setattro(obj, attr_name, value); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_setattr)) return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); #endif return PyObject_SetAttr(obj, attr_name, value); } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else if (likely(PyCFunction_Check(func))) #endif { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* PyObjectGetMethod */ static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { PyObject *attr; #if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP PyTypeObject *tp = Py_TYPE(obj); PyObject *descr; descrgetfunc f = NULL; PyObject **dictptr, *dict; int meth_found = 0; assert (*method == NULL); if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { attr = __Pyx_PyObject_GetAttrStr(obj, name); goto try_unpack; } if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { return 0; } descr = _PyType_Lookup(tp, name); if (likely(descr != NULL)) { Py_INCREF(descr); #if PY_MAJOR_VERSION >= 3 #ifdef __Pyx_CyFunction_USED if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) #else if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type))) #endif #else #ifdef __Pyx_CyFunction_USED if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) #else if (likely(PyFunction_Check(descr))) #endif #endif { meth_found = 1; } else { f = Py_TYPE(descr)->tp_descr_get; if (f != NULL && PyDescr_IsData(descr)) { attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); Py_DECREF(descr); goto try_unpack; } } } dictptr = _PyObject_GetDictPtr(obj); if (dictptr != NULL && (dict = *dictptr) != NULL) { Py_INCREF(dict); attr = __Pyx_PyDict_GetItemStr(dict, name); if (attr != NULL) { Py_INCREF(attr); Py_DECREF(dict); Py_XDECREF(descr); goto try_unpack; } Py_DECREF(dict); } if (meth_found) { *method = descr; return 1; } if (f != NULL) { attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); Py_DECREF(descr); goto try_unpack; } if (descr != NULL) { *method = descr; return 0; } PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(name)); #endif return 0; #else attr = __Pyx_PyObject_GetAttrStr(obj, name); goto try_unpack; #endif try_unpack: #if CYTHON_UNPACK_METHODS if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { PyObject *function = PyMethod_GET_FUNCTION(attr); Py_INCREF(function); Py_DECREF(attr); *method = function; return 1; } #endif *method = attr; return 0; } /* PyObjectCallMethod0 */ static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { PyObject *method = NULL, *result = NULL; int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); if (likely(is_method)) { result = __Pyx_PyObject_CallOneArg(method, obj); Py_DECREF(method); return result; } if (unlikely(!method)) goto bad; result = __Pyx_PyObject_CallNoArg(method); Py_DECREF(method); bad: return result; } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* UnpackTupleError */ static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { if (t == Py_None) { __Pyx_RaiseNoneNotIterableError(); } else if (PyTuple_GET_SIZE(t) < index) { __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); } else { __Pyx_RaiseTooManyValuesError(index); } } /* UnpackTuple2 */ static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) { PyObject *value1 = NULL, *value2 = NULL; #if CYTHON_COMPILING_IN_PYPY value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad; value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad; #else value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1); value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2); #endif if (decref_tuple) { Py_DECREF(tuple); } *pvalue1 = value1; *pvalue2 = value2; return 0; #if CYTHON_COMPILING_IN_PYPY bad: Py_XDECREF(value1); Py_XDECREF(value2); if (decref_tuple) { Py_XDECREF(tuple); } return -1; #endif } static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int has_known_size, int decref_tuple) { Py_ssize_t index; PyObject *value1 = NULL, *value2 = NULL, *iter = NULL; iternextfunc iternext; iter = PyObject_GetIter(tuple); if (unlikely(!iter)) goto bad; if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; } iternext = Py_TYPE(iter)->tp_iternext; value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; } value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; } if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad; Py_DECREF(iter); *pvalue1 = value1; *pvalue2 = value2; return 0; unpacking_failed: if (!has_known_size && __Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); bad: Py_XDECREF(iter); Py_XDECREF(value1); Py_XDECREF(value2); if (decref_tuple) { Py_XDECREF(tuple); } return -1; } /* dict_iter */ static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name, Py_ssize_t* p_orig_length, int* p_source_is_dict) { is_dict = is_dict || likely(PyDict_CheckExact(iterable)); *p_source_is_dict = is_dict; if (is_dict) { #if !CYTHON_COMPILING_IN_PYPY *p_orig_length = PyDict_Size(iterable); Py_INCREF(iterable); return iterable; #elif PY_MAJOR_VERSION >= 3 static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL; PyObject **pp = NULL; if (method_name) { const char *name = PyUnicode_AsUTF8(method_name); if (strcmp(name, "iteritems") == 0) pp = &py_items; else if (strcmp(name, "iterkeys") == 0) pp = &py_keys; else if (strcmp(name, "itervalues") == 0) pp = &py_values; if (pp) { if (!*pp) { *pp = PyUnicode_FromString(name + 4); if (!*pp) return NULL; } method_name = *pp; } } #endif } *p_orig_length = 0; if (method_name) { PyObject* iter; iterable = __Pyx_PyObject_CallMethod0(iterable, method_name); if (!iterable) return NULL; #if !CYTHON_COMPILING_IN_PYPY if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable)) return iterable; #endif iter = PyObject_GetIter(iterable); Py_DECREF(iterable); return iter; } return PyObject_GetIter(iterable); } static CYTHON_INLINE int __Pyx_dict_iter_next( PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { PyObject* next_item; #if !CYTHON_COMPILING_IN_PYPY if (source_is_dict) { PyObject *key, *value; if (unlikely(orig_length != PyDict_Size(iter_obj))) { PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration"); return -1; } if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) { return 0; } if (pitem) { PyObject* tuple = PyTuple_New(2); if (unlikely(!tuple)) { return -1; } Py_INCREF(key); Py_INCREF(value); PyTuple_SET_ITEM(tuple, 0, key); PyTuple_SET_ITEM(tuple, 1, value); *pitem = tuple; } else { if (pkey) { Py_INCREF(key); *pkey = key; } if (pvalue) { Py_INCREF(value); *pvalue = value; } } return 1; } else if (PyTuple_CheckExact(iter_obj)) { Py_ssize_t pos = *ppos; if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0; *ppos = pos + 1; next_item = PyTuple_GET_ITEM(iter_obj, pos); Py_INCREF(next_item); } else if (PyList_CheckExact(iter_obj)) { Py_ssize_t pos = *ppos; if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0; *ppos = pos + 1; next_item = PyList_GET_ITEM(iter_obj, pos); Py_INCREF(next_item); } else #endif { next_item = PyIter_Next(iter_obj); if (unlikely(!next_item)) { return __Pyx_IterFinish(); } } if (pitem) { *pitem = next_item; } else if (pkey && pvalue) { if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1)) return -1; } else if (pkey) { *pkey = next_item; } else { *pvalue = next_item; } return 1; } /* PyObjectFormatAndDecref */ static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f) { if (unlikely(!s)) return NULL; if (likely(PyUnicode_CheckExact(s))) return s; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(s))) { PyObject *result = PyUnicode_FromEncodedObject(s, NULL, "strict"); Py_DECREF(s); return result; } #endif return __Pyx_PyObject_FormatAndDecref(s, f); } static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f) { PyObject *result = PyObject_Format(s, f); Py_DECREF(s); return result; } /* JoinPyUnicode */ static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, CYTHON_UNUSED Py_UCS4 max_char) { #if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS PyObject *result_uval; int result_ukind; Py_ssize_t i, char_pos; void *result_udata; #if CYTHON_PEP393_ENABLED result_uval = PyUnicode_New(result_ulength, max_char); if (unlikely(!result_uval)) return NULL; result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; result_udata = PyUnicode_DATA(result_uval); #else result_uval = PyUnicode_FromUnicode(NULL, result_ulength); if (unlikely(!result_uval)) return NULL; result_ukind = sizeof(Py_UNICODE); result_udata = PyUnicode_AS_UNICODE(result_uval); #endif char_pos = 0; for (i=0; i < value_count; i++) { int ukind; Py_ssize_t ulength; void *udata; PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); if (unlikely(__Pyx_PyUnicode_READY(uval))) goto bad; ulength = __Pyx_PyUnicode_GET_LENGTH(uval); if (unlikely(!ulength)) continue; if (unlikely(char_pos + ulength < 0)) goto overflow; ukind = __Pyx_PyUnicode_KIND(uval); udata = __Pyx_PyUnicode_DATA(uval); if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { memcpy((char *)result_udata + char_pos * result_ukind, udata, (size_t) (ulength * result_ukind)); } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters) _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); #else Py_ssize_t j; for (j=0; j < ulength; j++) { Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); } #endif } char_pos += ulength; } return result_uval; overflow: PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); bad: Py_DECREF(result_uval); return NULL; #else result_ulength++; value_count++; return PyUnicode_Join(__pyx_empty_unicode, value_tuple); #endif } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* None */ static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) { PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* FetchCommonType */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { PyObject* fake_module; PyTypeObject* cached_type = NULL; fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); if (!fake_module) return NULL; Py_INCREF(fake_module); cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); if (cached_type) { if (!PyType_Check((PyObject*)cached_type)) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s is not a type object", type->tp_name); goto bad; } if (cached_type->tp_basicsize != type->tp_basicsize) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s has the wrong size, try recompiling", type->tp_name); goto bad; } } else { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); if (PyType_Ready(type) < 0) goto bad; if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) goto bad; Py_INCREF(type); cached_type = type; } done: Py_DECREF(fake_module); return cached_type; bad: Py_XDECREF(cached_type); cached_type = NULL; goto done; } /* CythonFunctionShared */ #include static PyObject * __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) { if (unlikely(op->func_doc == NULL)) { if (op->func.m_ml->ml_doc) { #if PY_MAJOR_VERSION >= 3 op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); #else op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); #endif if (unlikely(op->func_doc == NULL)) return NULL; } else { Py_INCREF(Py_None); return Py_None; } } Py_INCREF(op->func_doc); return op->func_doc; } static int __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp = op->func_doc; if (value == NULL) { value = Py_None; } Py_INCREF(value); op->func_doc = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); #else op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); #endif if (unlikely(op->func_name == NULL)) return NULL; } Py_INCREF(op->func_name); return op->func_name; } static int __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = op->func_name; Py_INCREF(value); op->func_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int __Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = op->func_qualname; Py_INCREF(value); op->func_qualname = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) { PyObject *self; self = m->func_closure; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyObject * __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); if (unlikely(op->func_dict == NULL)) return NULL; } Py_INCREF(op->func_dict); return op->func_dict; } static int __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; if (unlikely(value == NULL)) { PyErr_SetString(PyExc_TypeError, "function's dictionary may not be deleted"); return -1; } if (unlikely(!PyDict_Check(value))) { PyErr_SetString(PyExc_TypeError, "setting function's dictionary to a non-dict"); return -1; } tmp = op->func_dict; Py_INCREF(value); op->func_dict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * __Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(Py_None); return Py_None; } static PyObject * __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); return result; } static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); #else op->defaults_tuple = PySequence_ITEM(res, 0); if (unlikely(!op->defaults_tuple)) result = -1; else { op->defaults_kwdict = PySequence_ITEM(res, 1); if (unlikely(!op->defaults_kwdict)) result = -1; } #endif Py_DECREF(res); return result; } static int __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyTuple_Check(value)) { PyErr_SetString(PyExc_TypeError, "__defaults__ must be set to a tuple object"); return -1; } Py_INCREF(value); tmp = op->defaults_tuple; op->defaults_tuple = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_tuple; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__kwdefaults__ must be set to a dict object"); return -1; } Py_INCREF(value); tmp = op->defaults_kwdict; op->defaults_kwdict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_kwdict; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; } else if (!PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__annotations__ must be set to a dict object"); return -1; } Py_XINCREF(value); tmp = op->func_annotations; op->func_annotations = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); if (unlikely(!result)) return NULL; op->func_annotations = result; } Py_INCREF(result); return result; } static PyGetSetDef __pyx_CyFunction_getsets[] = { {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, {0, 0, 0, 0, 0} }; static PyMemberDef __pyx_CyFunction_members[] = { {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0}, {0, 0, 0, 0, 0} }; static PyObject * __Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(m->func.m_ml->ml_name); #else return PyString_FromString(m->func.m_ml->ml_name); #endif } static PyMethodDef __pyx_CyFunction_methods[] = { {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, {0, 0, 0, 0} }; #if PY_VERSION_HEX < 0x030500A0 #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) #else #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) #endif static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { if (unlikely(op == NULL)) return NULL; op->flags = flags; __Pyx_CyFunction_weakreflist(op) = NULL; op->func.m_ml = ml; op->func.m_self = (PyObject *) op; Py_XINCREF(closure); op->func_closure = closure; Py_XINCREF(module); op->func.m_module = module; op->func_dict = NULL; op->func_name = NULL; Py_INCREF(qualname); op->func_qualname = qualname; op->func_doc = NULL; op->func_classobj = NULL; op->func_globals = globals; Py_INCREF(op->func_globals); Py_XINCREF(code); op->func_code = code; op->defaults_pyobjects = 0; op->defaults_size = 0; op->defaults = NULL; op->defaults_tuple = NULL; op->defaults_kwdict = NULL; op->defaults_getter = NULL; op->func_annotations = NULL; return (PyObject *) op; } static int __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) { Py_CLEAR(m->func_closure); Py_CLEAR(m->func.m_module); Py_CLEAR(m->func_dict); Py_CLEAR(m->func_name); Py_CLEAR(m->func_qualname); Py_CLEAR(m->func_doc); Py_CLEAR(m->func_globals); Py_CLEAR(m->func_code); Py_CLEAR(m->func_classobj); Py_CLEAR(m->defaults_tuple); Py_CLEAR(m->defaults_kwdict); Py_CLEAR(m->func_annotations); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); PyObject_Free(m->defaults); m->defaults = NULL; } return 0; } static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) { if (__Pyx_CyFunction_weakreflist(m) != NULL) PyObject_ClearWeakRefs((PyObject *) m); __Pyx_CyFunction_clear(m); PyObject_GC_Del(m); } static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) { PyObject_GC_UnTrack(m); __Pyx__CyFunction_dealloc(m); } static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->func_closure); Py_VISIT(m->func.m_module); Py_VISIT(m->func_dict); Py_VISIT(m->func_name); Py_VISIT(m->func_qualname); Py_VISIT(m->func_doc); Py_VISIT(m->func_globals); Py_VISIT(m->func_code); Py_VISIT(m->func_classobj); Py_VISIT(m->defaults_tuple); Py_VISIT(m->defaults_kwdict); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_VISIT(pydefaults[i]); } return 0; } static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) { #if PY_MAJOR_VERSION < 3 __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(func); return func; } if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; #endif return __Pyx_PyMethod_New(func, obj, type); } static PyObject* __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("", op->func_qualname, (void *)op); #else return PyString_FromFormat("", PyString_AsString(op->func_qualname), (void *)op); #endif } static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = f->m_ml->ml_meth; Py_ssize_t size; switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); case METH_NOARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 1)) { PyObject *result, *arg0; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS arg0 = PyTuple_GET_ITEM(arg, 0); #else arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; #endif result = (*meth)(self, arg0); #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) Py_DECREF(arg0); #endif return result; } PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "__Pyx_CyFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); } static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { PyObject *result; __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { Py_ssize_t argc; PyObject *new_args; PyObject *self; argc = PyTuple_GET_SIZE(args); new_args = PyTuple_GetSlice(args, 1, argc); if (unlikely(!new_args)) return NULL; self = PyTuple_GetItem(args, 0); if (unlikely(!self)) { Py_DECREF(new_args); return NULL; } result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); Py_DECREF(new_args); } else { result = __Pyx_CyFunction_Call(func, args, kw); } return result; } static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", sizeof(__pyx_CyFunctionObject), 0, (destructor) __Pyx_CyFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif (reprfunc) __Pyx_CyFunction_repr, 0, 0, 0, 0, __Pyx_CyFunction_CallAsMethod, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, 0, (traverseproc) __Pyx_CyFunction_traverse, (inquiry) __Pyx_CyFunction_clear, 0, #if PY_VERSION_HEX < 0x030500A0 offsetof(__pyx_CyFunctionObject, func_weakreflist), #else offsetof(PyCFunctionObject, m_weakreflist), #endif 0, 0, __pyx_CyFunction_methods, __pyx_CyFunction_members, __pyx_CyFunction_getsets, 0, 0, __Pyx_CyFunction_descr_get, 0, offsetof(__pyx_CyFunctionObject, func_dict), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif #if PY_VERSION_HEX >= 0x030800b1 0, #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, #endif }; static int __pyx_CyFunction_init(void) { __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (unlikely(__pyx_CyFunctionType == NULL)) { return -1; } return 0; } static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults = PyObject_Malloc(size); if (unlikely(!m->defaults)) return PyErr_NoMemory(); memset(m->defaults, 0, size); m->defaults_pyobjects = pyobjects; m->defaults_size = size; return m->defaults; } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_tuple = tuple; Py_INCREF(tuple); } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_kwdict = dict; Py_INCREF(dict); } static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->func_annotations = dict; Py_INCREF(dict); } /* CythonFunction */ static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { PyObject *op = __Pyx_CyFunction_Init( PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), ml, flags, qualname, closure, module, globals, code ); if (likely(op)) { PyObject_GC_Track(op); } return op; } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CalculateMetaclass */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); for (i=0; i < nbases; i++) { PyTypeObject *tmptype; PyObject *tmp = PyTuple_GET_ITEM(bases, i); tmptype = Py_TYPE(tmp); #if PY_MAJOR_VERSION < 3 if (tmptype == &PyClass_Type) continue; #endif if (!metaclass) { metaclass = tmptype; continue; } if (PyType_IsSubtype(metaclass, tmptype)) continue; if (PyType_IsSubtype(tmptype, metaclass)) { metaclass = tmptype; continue; } PyErr_SetString(PyExc_TypeError, "metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases"); return NULL; } if (!metaclass) { #if PY_MAJOR_VERSION < 3 metaclass = &PyClass_Type; #else metaclass = &PyType_Type; #endif } Py_INCREF((PyObject*) metaclass); return (PyObject*) metaclass; } /* Py3ClassCreate */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { PyObject *ns; if (metaclass) { PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); if (prep) { PyObject *pargs = PyTuple_Pack(2, name, bases); if (unlikely(!pargs)) { Py_DECREF(prep); return NULL; } ns = PyObject_Call(prep, pargs, mkw); Py_DECREF(prep); Py_DECREF(pargs); } else { if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; PyErr_Clear(); ns = PyDict_New(); } } else { ns = PyDict_New(); } if (unlikely(!ns)) return NULL; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc_2, doc) < 0)) goto bad; return ns; bad: Py_DECREF(ns); return NULL; } static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass) { PyObject *result, *margs; PyObject *owned_metaclass = NULL; if (allow_py2_metaclass) { owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); if (owned_metaclass) { metaclass = owned_metaclass; } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { PyErr_Clear(); } else { return NULL; } } if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); Py_XDECREF(owned_metaclass); if (unlikely(!metaclass)) return NULL; owned_metaclass = metaclass; } margs = PyTuple_Pack(3, name, bases, dict); if (unlikely(!margs)) { result = NULL; } else { result = PyObject_Call(metaclass, margs, mkw); Py_DECREF(margs); } Py_XDECREF(owned_metaclass); return result; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; iexc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* PyObjectCallMethod1 */ static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); Py_DECREF(method); return result; } static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { PyObject *method = NULL, *result; int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); if (likely(is_method)) { result = __Pyx_PyObject_Call2Args(method, obj, arg); Py_DECREF(method); return result; } if (unlikely(!method)) return NULL; return __Pyx__PyObject_CallMethod1(method, arg); } /* CoroutineBase */ #include #include #define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom) static int __Pyx_PyGen__FetchStopIterationValue(CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject **pvalue) { PyObject *et, *ev, *tb; PyObject *value = NULL; __Pyx_ErrFetch(&et, &ev, &tb); if (!et) { Py_XDECREF(tb); Py_XDECREF(ev); Py_INCREF(Py_None); *pvalue = Py_None; return 0; } if (likely(et == PyExc_StopIteration)) { if (!ev) { Py_INCREF(Py_None); value = Py_None; } #if PY_VERSION_HEX >= 0x030300A0 else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) { value = ((PyStopIterationObject *)ev)->value; Py_INCREF(value); Py_DECREF(ev); } #endif else if (unlikely(PyTuple_Check(ev))) { if (PyTuple_GET_SIZE(ev) >= 1) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS value = PyTuple_GET_ITEM(ev, 0); Py_INCREF(value); #else value = PySequence_ITEM(ev, 0); #endif } else { Py_INCREF(Py_None); value = Py_None; } Py_DECREF(ev); } else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) { value = ev; } if (likely(value)) { Py_XDECREF(tb); Py_DECREF(et); *pvalue = value; return 0; } } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) { __Pyx_ErrRestore(et, ev, tb); return -1; } PyErr_NormalizeException(&et, &ev, &tb); if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { __Pyx_ErrRestore(et, ev, tb); return -1; } Py_XDECREF(tb); Py_DECREF(et); #if PY_VERSION_HEX >= 0x030300A0 value = ((PyStopIterationObject *)ev)->value; Py_INCREF(value); Py_DECREF(ev); #else { PyObject* args = __Pyx_PyObject_GetAttrStr(ev, __pyx_n_s_args); Py_DECREF(ev); if (likely(args)) { value = PySequence_GetItem(args, 0); Py_DECREF(args); } if (unlikely(!value)) { __Pyx_ErrRestore(NULL, NULL, NULL); Py_INCREF(Py_None); value = Py_None; } } #endif *pvalue = value; return 0; } static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { PyObject *t, *v, *tb; t = exc_state->exc_type; v = exc_state->exc_value; tb = exc_state->exc_traceback; exc_state->exc_type = NULL; exc_state->exc_value = NULL; exc_state->exc_traceback = NULL; Py_XDECREF(t); Py_XDECREF(v); Py_XDECREF(tb); } #define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL) static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineObject *gen) { const char *msg; if ((0)) { #ifdef __Pyx_Coroutine_USED } else if (__Pyx_Coroutine_Check((PyObject*)gen)) { msg = "coroutine already executing"; #endif #ifdef __Pyx_AsyncGen_USED } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) { msg = "async generator already executing"; #endif } else { msg = "generator already executing"; } PyErr_SetString(PyExc_ValueError, msg); } #define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL) static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) { const char *msg; if ((0)) { #ifdef __Pyx_Coroutine_USED } else if (__Pyx_Coroutine_Check(gen)) { msg = "can't send non-None value to a just-started coroutine"; #endif #ifdef __Pyx_AsyncGen_USED } else if (__Pyx_AsyncGen_CheckExact(gen)) { msg = "can't send non-None value to a just-started async generator"; #endif } else { msg = "can't send non-None value to a just-started generator"; } PyErr_SetString(PyExc_TypeError, msg); } #define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL) static void __Pyx__Coroutine_AlreadyTerminatedError(CYTHON_UNUSED PyObject *gen, PyObject *value, CYTHON_UNUSED int closing) { #ifdef __Pyx_Coroutine_USED if (!closing && __Pyx_Coroutine_Check(gen)) { PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine"); } else #endif if (value) { #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(gen)) PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); else #endif PyErr_SetNone(PyExc_StopIteration); } } static PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) { __Pyx_PyThreadState_declare PyThreadState *tstate; __Pyx_ExcInfoStruct *exc_state; PyObject *retval; assert(!self->is_running); if (unlikely(self->resume_label == 0)) { if (unlikely(value && value != Py_None)) { return __Pyx_Coroutine_NotStartedError((PyObject*)self); } } if (unlikely(self->resume_label == -1)) { return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing); } #if CYTHON_FAST_THREAD_STATE __Pyx_PyThreadState_assign tstate = __pyx_tstate; #else tstate = __Pyx_PyThreadState_Current; #endif exc_state = &self->gi_exc_state; if (exc_state->exc_type) { #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON #else if (exc_state->exc_traceback) { PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback; PyFrameObject *f = tb->tb_frame; Py_XINCREF(tstate->frame); assert(f->f_back == NULL); f->f_back = tstate->frame; } #endif } #if CYTHON_USE_EXC_INFO_STACK exc_state->previous_item = tstate->exc_info; tstate->exc_info = exc_state; #else if (exc_state->exc_type) { __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); } else { __Pyx_Coroutine_ExceptionClear(exc_state); __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); } #endif self->is_running = 1; retval = self->body((PyObject *) self, tstate, value); self->is_running = 0; #if CYTHON_USE_EXC_INFO_STACK exc_state = &self->gi_exc_state; tstate->exc_info = exc_state->previous_item; exc_state->previous_item = NULL; __Pyx_Coroutine_ResetFrameBackpointer(exc_state); #endif return retval; } static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { PyObject *exc_tb = exc_state->exc_traceback; if (likely(exc_tb)) { #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON #else PyTracebackObject *tb = (PyTracebackObject *) exc_tb; PyFrameObject *f = tb->tb_frame; Py_CLEAR(f->f_back); #endif } } static CYTHON_INLINE PyObject *__Pyx_Coroutine_MethodReturn(CYTHON_UNUSED PyObject* gen, PyObject *retval) { if (unlikely(!retval)) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (!__Pyx_PyErr_Occurred()) { PyObject *exc = PyExc_StopIteration; #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(gen)) exc = __Pyx_PyExc_StopAsyncIteration; #endif __Pyx_PyErr_SetNone(exc); } } return retval; } static CYTHON_INLINE PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) { PyObject *ret; PyObject *val = NULL; __Pyx_Coroutine_Undelegate(gen); __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val); ret = __Pyx_Coroutine_SendEx(gen, val, 0); Py_XDECREF(val); return ret; } static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { PyObject *retval; __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; PyObject *yf = gen->yieldfrom; if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); if (yf) { PyObject *ret; gen->is_running = 1; #ifdef __Pyx_Generator_USED if (__Pyx_Generator_CheckExact(yf)) { ret = __Pyx_Coroutine_Send(yf, value); } else #endif #ifdef __Pyx_Coroutine_USED if (__Pyx_Coroutine_Check(yf)) { ret = __Pyx_Coroutine_Send(yf, value); } else #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_PyAsyncGenASend_CheckExact(yf)) { ret = __Pyx_async_gen_asend_send(yf, value); } else #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) if (PyGen_CheckExact(yf)) { ret = _PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); } else #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) if (PyCoro_CheckExact(yf)) { ret = _PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); } else #endif { if (value == Py_None) ret = Py_TYPE(yf)->tp_iternext(yf); else ret = __Pyx_PyObject_CallMethod1(yf, __pyx_n_s_send, value); } gen->is_running = 0; if (likely(ret)) { return ret; } retval = __Pyx_Coroutine_FinishDelegation(gen); } else { retval = __Pyx_Coroutine_SendEx(gen, value, 0); } return __Pyx_Coroutine_MethodReturn(self, retval); } static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { PyObject *retval = NULL; int err = 0; #ifdef __Pyx_Generator_USED if (__Pyx_Generator_CheckExact(yf)) { retval = __Pyx_Coroutine_Close(yf); if (!retval) return -1; } else #endif #ifdef __Pyx_Coroutine_USED if (__Pyx_Coroutine_Check(yf)) { retval = __Pyx_Coroutine_Close(yf); if (!retval) return -1; } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL); if (!retval) return -1; } else #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_PyAsyncGenASend_CheckExact(yf)) { retval = __Pyx_async_gen_asend_close(yf, NULL); } else if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) { retval = __Pyx_async_gen_athrow_close(yf, NULL); } else #endif { PyObject *meth; gen->is_running = 1; meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_close); if (unlikely(!meth)) { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_WriteUnraisable(yf); } PyErr_Clear(); } else { retval = PyObject_CallFunction(meth, NULL); Py_DECREF(meth); if (!retval) err = -1; } gen->is_running = 0; } Py_XDECREF(retval); return err; } static PyObject *__Pyx_Generator_Next(PyObject *self) { __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; PyObject *yf = gen->yieldfrom; if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); if (yf) { PyObject *ret; gen->is_running = 1; #ifdef __Pyx_Generator_USED if (__Pyx_Generator_CheckExact(yf)) { ret = __Pyx_Generator_Next(yf); } else #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) if (PyGen_CheckExact(yf)) { ret = _PyGen_Send((PyGenObject*)yf, NULL); } else #endif #ifdef __Pyx_Coroutine_USED if (__Pyx_Coroutine_Check(yf)) { ret = __Pyx_Coroutine_Send(yf, Py_None); } else #endif ret = Py_TYPE(yf)->tp_iternext(yf); gen->is_running = 0; if (likely(ret)) { return ret; } return __Pyx_Coroutine_FinishDelegation(gen); } return __Pyx_Coroutine_SendEx(gen, Py_None, 0); } static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, CYTHON_UNUSED PyObject *arg) { return __Pyx_Coroutine_Close(self); } static PyObject *__Pyx_Coroutine_Close(PyObject *self) { __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; PyObject *retval, *raised_exception; PyObject *yf = gen->yieldfrom; int err = 0; if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); if (yf) { Py_INCREF(yf); err = __Pyx_Coroutine_CloseIter(gen, yf); __Pyx_Coroutine_Undelegate(gen); Py_DECREF(yf); } if (err == 0) PyErr_SetNone(PyExc_GeneratorExit); retval = __Pyx_Coroutine_SendEx(gen, NULL, 1); if (unlikely(retval)) { const char *msg; Py_DECREF(retval); if ((0)) { #ifdef __Pyx_Coroutine_USED } else if (__Pyx_Coroutine_Check(self)) { msg = "coroutine ignored GeneratorExit"; #endif #ifdef __Pyx_AsyncGen_USED } else if (__Pyx_AsyncGen_CheckExact(self)) { #if PY_VERSION_HEX < 0x03060000 msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)"; #else msg = "async generator ignored GeneratorExit"; #endif #endif } else { msg = "generator ignored GeneratorExit"; } PyErr_SetString(PyExc_RuntimeError, msg); return NULL; } raised_exception = PyErr_Occurred(); if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) { if (raised_exception) PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } return NULL; } static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb, PyObject *args, int close_on_genexit) { __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; PyObject *yf = gen->yieldfrom; if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); if (yf) { PyObject *ret; Py_INCREF(yf); if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) { int err = __Pyx_Coroutine_CloseIter(gen, yf); Py_DECREF(yf); __Pyx_Coroutine_Undelegate(gen); if (err < 0) return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); goto throw_here; } gen->is_running = 1; if (0 #ifdef __Pyx_Generator_USED || __Pyx_Generator_CheckExact(yf) #endif #ifdef __Pyx_Coroutine_USED || __Pyx_Coroutine_Check(yf) #endif ) { ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit); #ifdef __Pyx_Coroutine_USED } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit); #endif } else { PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_throw); if (unlikely(!meth)) { Py_DECREF(yf); if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { gen->is_running = 0; return NULL; } PyErr_Clear(); __Pyx_Coroutine_Undelegate(gen); gen->is_running = 0; goto throw_here; } if (likely(args)) { ret = PyObject_CallObject(meth, args); } else { ret = PyObject_CallFunctionObjArgs(meth, typ, val, tb, NULL); } Py_DECREF(meth); } gen->is_running = 0; Py_DECREF(yf); if (!ret) { ret = __Pyx_Coroutine_FinishDelegation(gen); } return __Pyx_Coroutine_MethodReturn(self, ret); } throw_here: __Pyx_Raise(typ, val, tb, NULL); return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); } static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) { PyObject *typ; PyObject *val = NULL; PyObject *tb = NULL; if (!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb)) return NULL; return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1); } static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { Py_VISIT(exc_state->exc_type); Py_VISIT(exc_state->exc_value); Py_VISIT(exc_state->exc_traceback); return 0; } static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) { Py_VISIT(gen->closure); Py_VISIT(gen->classobj); Py_VISIT(gen->yieldfrom); return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); } static int __Pyx_Coroutine_clear(PyObject *self) { __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; Py_CLEAR(gen->closure); Py_CLEAR(gen->classobj); Py_CLEAR(gen->yieldfrom); __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(self)) { Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer); } #endif Py_CLEAR(gen->gi_code); Py_CLEAR(gen->gi_name); Py_CLEAR(gen->gi_qualname); Py_CLEAR(gen->gi_modulename); return 0; } static void __Pyx_Coroutine_dealloc(PyObject *self) { __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; PyObject_GC_UnTrack(gen); if (gen->gi_weakreflist != NULL) PyObject_ClearWeakRefs(self); if (gen->resume_label >= 0) { PyObject_GC_Track(self); #if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE if (PyObject_CallFinalizerFromDealloc(self)) #else Py_TYPE(gen)->tp_del(self); if (self->ob_refcnt > 0) #endif { return; } PyObject_GC_UnTrack(self); } #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(self)) { /* We have to handle this case for asynchronous generators right here, because this code has to be between UNTRACK and GC_Del. */ Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer); } #endif __Pyx_Coroutine_clear(self); PyObject_GC_Del(gen); } static void __Pyx_Coroutine_del(PyObject *self) { PyObject *error_type, *error_value, *error_traceback; __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; __Pyx_PyThreadState_declare if (gen->resume_label < 0) { return; } #if !CYTHON_USE_TP_FINALIZE assert(self->ob_refcnt == 0); self->ob_refcnt = 1; #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(self)) { __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self; PyObject *finalizer = agen->ag_finalizer; if (finalizer && !agen->ag_closed) { PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self); if (unlikely(!res)) { PyErr_WriteUnraisable(self); } else { Py_DECREF(res); } __Pyx_ErrRestore(error_type, error_value, error_traceback); return; } } #endif if (unlikely(gen->resume_label == 0 && !error_value)) { #ifdef __Pyx_Coroutine_USED #ifdef __Pyx_Generator_USED if (!__Pyx_Generator_CheckExact(self)) #endif { PyObject_GC_UnTrack(self); #if PY_MAJOR_VERSION >= 3 || defined(PyErr_WarnFormat) if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0)) PyErr_WriteUnraisable(self); #else {PyObject *msg; char *cmsg; #if CYTHON_COMPILING_IN_PYPY msg = NULL; cmsg = (char*) "coroutine was never awaited"; #else char *cname; PyObject *qualname; qualname = gen->gi_qualname; cname = PyString_AS_STRING(qualname); msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname); if (unlikely(!msg)) { PyErr_Clear(); cmsg = (char*) "coroutine was never awaited"; } else { cmsg = PyString_AS_STRING(msg); } #endif if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0)) PyErr_WriteUnraisable(self); Py_XDECREF(msg);} #endif PyObject_GC_Track(self); } #endif } else { PyObject *res = __Pyx_Coroutine_Close(self); if (unlikely(!res)) { if (PyErr_Occurred()) PyErr_WriteUnraisable(self); } else { Py_DECREF(res); } } __Pyx_ErrRestore(error_type, error_value, error_traceback); #if !CYTHON_USE_TP_FINALIZE assert(self->ob_refcnt > 0); if (--self->ob_refcnt == 0) { return; } { Py_ssize_t refcnt = self->ob_refcnt; _Py_NewReference(self); self->ob_refcnt = refcnt; } #if CYTHON_COMPILING_IN_CPYTHON assert(PyType_IS_GC(self->ob_type) && _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED); _Py_DEC_REFTOTAL; #endif #ifdef COUNT_ALLOCS --Py_TYPE(self)->tp_frees; --Py_TYPE(self)->tp_allocs; #endif #endif } static PyObject * __Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) { PyObject *name = self->gi_name; if (unlikely(!name)) name = Py_None; Py_INCREF(name); return name; } static int __Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = self->gi_name; Py_INCREF(value); self->gi_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) { PyObject *name = self->gi_qualname; if (unlikely(!name)) name = Py_None; Py_INCREF(name); return name; } static int __Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = self->gi_qualname; Py_INCREF(value); self->gi_qualname = value; Py_XDECREF(tmp); return 0; } static __pyx_CoroutineObject *__Pyx__Coroutine_New( PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, PyObject *name, PyObject *qualname, PyObject *module_name) { __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); if (unlikely(!gen)) return NULL; return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name); } static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, PyObject *name, PyObject *qualname, PyObject *module_name) { gen->body = body; gen->closure = closure; Py_XINCREF(closure); gen->is_running = 0; gen->resume_label = 0; gen->classobj = NULL; gen->yieldfrom = NULL; gen->gi_exc_state.exc_type = NULL; gen->gi_exc_state.exc_value = NULL; gen->gi_exc_state.exc_traceback = NULL; #if CYTHON_USE_EXC_INFO_STACK gen->gi_exc_state.previous_item = NULL; #endif gen->gi_weakreflist = NULL; Py_XINCREF(qualname); gen->gi_qualname = qualname; Py_XINCREF(name); gen->gi_name = name; Py_XINCREF(module_name); gen->gi_modulename = module_name; Py_XINCREF(code); gen->gi_code = code; PyObject_GC_Track(gen); return gen; } /* PatchModuleWithCoroutine */ static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) { #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) int result; PyObject *globals, *result_obj; globals = PyDict_New(); if (unlikely(!globals)) goto ignore; result = PyDict_SetItemString(globals, "_cython_coroutine_type", #ifdef __Pyx_Coroutine_USED (PyObject*)__pyx_CoroutineType); #else Py_None); #endif if (unlikely(result < 0)) goto ignore; result = PyDict_SetItemString(globals, "_cython_generator_type", #ifdef __Pyx_Generator_USED (PyObject*)__pyx_GeneratorType); #else Py_None); #endif if (unlikely(result < 0)) goto ignore; if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore; if (unlikely(PyDict_SetItemString(globals, "__builtins__", __pyx_b) < 0)) goto ignore; result_obj = PyRun_String(py_code, Py_file_input, globals, globals); if (unlikely(!result_obj)) goto ignore; Py_DECREF(result_obj); Py_DECREF(globals); return module; ignore: Py_XDECREF(globals); PyErr_WriteUnraisable(module); if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) { Py_DECREF(module); module = NULL; } #else py_code++; #endif return module; } /* PatchGeneratorABC */ #ifndef CYTHON_REGISTER_ABCS #define CYTHON_REGISTER_ABCS 1 #endif #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) static PyObject* __Pyx_patch_abc_module(PyObject *module); static PyObject* __Pyx_patch_abc_module(PyObject *module) { module = __Pyx_Coroutine_patch_module( module, "" "if _cython_generator_type is not None:\n" " try: Generator = _module.Generator\n" " except AttributeError: pass\n" " else: Generator.register(_cython_generator_type)\n" "if _cython_coroutine_type is not None:\n" " try: Coroutine = _module.Coroutine\n" " except AttributeError: pass\n" " else: Coroutine.register(_cython_coroutine_type)\n" ); return module; } #endif static int __Pyx_patch_abc(void) { #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) static int abc_patched = 0; if (CYTHON_REGISTER_ABCS && !abc_patched) { PyObject *module; module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections"); if (!module) { PyErr_WriteUnraisable(NULL); if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, ((PY_MAJOR_VERSION >= 3) ? "Cython module failed to register with collections.abc module" : "Cython module failed to register with collections module"), 1) < 0)) { return -1; } } else { module = __Pyx_patch_abc_module(module); abc_patched = 1; if (unlikely(!module)) return -1; Py_DECREF(module); } module = PyImport_ImportModule("backports_abc"); if (module) { module = __Pyx_patch_abc_module(module); Py_XDECREF(module); } if (!module) { PyErr_Clear(); } } #else if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL); #endif return 0; } /* Generator */ static PyMethodDef __pyx_Generator_methods[] = { {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, {0, 0, 0, 0} }; static PyMemberDef __pyx_Generator_memberlist[] = { {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, (char*) PyDoc_STR("object being iterated by 'yield from', or None")}, {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, {0, 0, 0, 0, 0} }; static PyGetSetDef __pyx_Generator_getsets[] = { {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, (char*) PyDoc_STR("name of the generator"), 0}, {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, (char*) PyDoc_STR("qualified name of the generator"), 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_GeneratorType_type = { PyVarObject_HEAD_INIT(0, 0) "generator", sizeof(__pyx_CoroutineObject), 0, (destructor) __Pyx_Coroutine_dealloc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, 0, (traverseproc) __Pyx_Coroutine_traverse, 0, 0, offsetof(__pyx_CoroutineObject, gi_weakreflist), 0, (iternextfunc) __Pyx_Generator_Next, __pyx_Generator_methods, __pyx_Generator_memberlist, __pyx_Generator_getsets, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if CYTHON_USE_TP_FINALIZE 0, #else __Pyx_Coroutine_del, #endif 0, #if CYTHON_USE_TP_FINALIZE __Pyx_Coroutine_del, #elif PY_VERSION_HEX >= 0x030400a1 0, #endif #if PY_VERSION_HEX >= 0x030800b1 0, #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, #endif }; static int __pyx_Generator_init(void) { __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter; __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type); if (unlikely(!__pyx_GeneratorType)) { return -1; } return 0; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ borgbackup-1.1.15/src/borg/logger.py0000644000175000017500000002171513771325506017264 0ustar useruser00000000000000"""logging facilities The way to use this is as follows: * each module declares its own logger, using: from .logger import create_logger logger = create_logger() * then each module uses logger.info/warning/debug/etc according to the level it believes is appropriate: logger.debug('debugging info for developers or power users') logger.info('normal, informational output') logger.warning('warn about a non-fatal error or sth else') logger.error('a fatal error') ... and so on. see the `logging documentation `_ for more information * console interaction happens on stderr, that includes interactive reporting functions like `help`, `info` and `list` * ...except ``input()`` is special, because we can't control the stream it is using, unfortunately. we assume that it won't clutter stdout, because interaction would be broken then anyways * what is output on INFO level is additionally controlled by commandline flags """ import inspect import json import logging import logging.config import logging.handlers # needed for handlers defined there being configurable in logging.conf file import os import warnings configured = False # use something like this to ignore warnings: # warnings.filterwarnings('ignore', r'... regex for warning message to ignore ...') def _log_warning(message, category, filename, lineno, file=None, line=None): # for warnings, we just want to use the logging system, not stderr or other files msg = "{0}:{1}: {2}: {3}".format(filename, lineno, category.__name__, message) logger = create_logger(__name__) # Note: the warning will look like coming from here, # but msg contains info about where it really comes from logger.warning(msg) def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', level='info', is_serve=False, json=False): """setup logging module according to the arguments provided if conf_fname is given (or the config file name can be determined via the env_var, if given): load this logging configuration. otherwise, set up a stream handler logger on stderr (by default, if no stream is provided). if is_serve == True, we configure a special log format as expected by the borg client log message interceptor. """ global configured err_msg = None if env_var: conf_fname = os.environ.get(env_var, conf_fname) if conf_fname: try: conf_fname = os.path.abspath(conf_fname) # we open the conf file here to be able to give a reasonable # error message in case of failure (if we give the filename to # fileConfig(), it silently ignores unreadable files and gives # unhelpful error msgs like "No section: 'formatters'"): with open(conf_fname) as f: logging.config.fileConfig(f) configured = True logger = logging.getLogger(__name__) borg_logger = logging.getLogger('borg') borg_logger.json = json logger.debug('using logging configuration read from "{0}"'.format(conf_fname)) warnings.showwarning = _log_warning return None except Exception as err: # XXX be more precise err_msg = str(err) # if we did not / not successfully load a logging configuration, fallback to this: logger = logging.getLogger('') handler = logging.StreamHandler(stream) if is_serve and not json: fmt = '$LOG %(levelname)s %(name)s Remote: %(message)s' else: fmt = '%(message)s' formatter = JsonFormatter(fmt) if json else logging.Formatter(fmt) handler.setFormatter(formatter) borg_logger = logging.getLogger('borg') borg_logger.formatter = formatter borg_logger.json = json if configured and logger.handlers: # The RepositoryServer can call setup_logging a second time to adjust the output # mode from text-ish is_serve to json is_serve. # Thus, remove the previously installed handler, if any. logger.handlers[0].close() logger.handlers.clear() logger.addHandler(handler) logger.setLevel(level.upper()) configured = True logger = logging.getLogger(__name__) if err_msg: logger.warning('setup_logging for "{0}" failed with "{1}".'.format(conf_fname, err_msg)) logger.debug('using builtin fallback logging configuration') warnings.showwarning = _log_warning return handler def find_parent_module(): """find the name of a the first module calling this module if we cannot find it, we return the current module's name (__name__) instead. """ try: frame = inspect.currentframe().f_back module = inspect.getmodule(frame) while module is None or module.__name__ == __name__: frame = frame.f_back module = inspect.getmodule(frame) return module.__name__ except AttributeError: # somehow we failed to find our module # return the logger module name by default return __name__ def create_logger(name=None): """lazily create a Logger object with the proper path, which is returned by find_parent_module() by default, or is provided via the commandline this is really a shortcut for: logger = logging.getLogger(__name__) we use it to avoid errors and provide a more standard API. We must create the logger lazily, because this is usually called from module level (and thus executed at import time - BEFORE setup_logging() was called). By doing it lazily we can do the setup first, we just have to be careful not to call any logger methods before the setup_logging() call. If you try, you'll get an exception. """ class LazyLogger: def __init__(self, name=None): self.__name = name or find_parent_module() self.__real_logger = None @property def __logger(self): if self.__real_logger is None: if not configured: raise Exception("tried to call a logger before setup_logging() was called") self.__real_logger = logging.getLogger(self.__name) if self.__name.startswith('borg.debug.') and self.__real_logger.level == logging.NOTSET: self.__real_logger.setLevel('WARNING') return self.__real_logger def getChild(self, suffix): return LazyLogger(self.__name + '.' + suffix) def setLevel(self, *args, **kw): return self.__logger.setLevel(*args, **kw) def log(self, *args, **kw): if 'msgid' in kw: kw.setdefault('extra', {})['msgid'] = kw.pop('msgid') return self.__logger.log(*args, **kw) def exception(self, *args, **kw): if 'msgid' in kw: kw.setdefault('extra', {})['msgid'] = kw.pop('msgid') return self.__logger.exception(*args, **kw) def debug(self, *args, **kw): if 'msgid' in kw: kw.setdefault('extra', {})['msgid'] = kw.pop('msgid') return self.__logger.debug(*args, **kw) def info(self, *args, **kw): if 'msgid' in kw: kw.setdefault('extra', {})['msgid'] = kw.pop('msgid') return self.__logger.info(*args, **kw) def warning(self, *args, **kw): if 'msgid' in kw: kw.setdefault('extra', {})['msgid'] = kw.pop('msgid') return self.__logger.warning(*args, **kw) def error(self, *args, **kw): if 'msgid' in kw: kw.setdefault('extra', {})['msgid'] = kw.pop('msgid') return self.__logger.error(*args, **kw) def critical(self, *args, **kw): if 'msgid' in kw: kw.setdefault('extra', {})['msgid'] = kw.pop('msgid') return self.__logger.critical(*args, **kw) return LazyLogger(name) class JsonFormatter(logging.Formatter): RECORD_ATTRIBUTES = ( 'levelname', 'name', 'message', # msgid is an attribute we made up in Borg to expose a non-changing handle for log messages 'msgid', ) # Other attributes that are not very useful but do exist: # processName, process, relativeCreated, stack_info, thread, threadName # msg == message # *args* are the unformatted arguments passed to the logger function, not useful now, # become useful if sanitized properly (must be JSON serializable) in the code + # fixed message IDs are assigned. # exc_info, exc_text are generally uninteresting because the message will have that def format(self, record): super().format(record) data = { 'type': 'log_message', 'time': record.created, 'message': '', 'levelname': 'CRITICAL', } for attr in self.RECORD_ATTRIBUTES: value = getattr(record, attr, None) if value: data[attr] = value return json.dumps(data) borgbackup-1.1.15/src/borg/__main__.py0000644000175000017500000000004613771325506017517 0ustar useruser00000000000000from borg.archiver import main main() borgbackup-1.1.15/src/borg/__init__.py0000644000175000017500000000151313771325506017536 0ustar useruser00000000000000from distutils.version import LooseVersion # IMPORTANT keep imports from borg here to a minimum because our testsuite depends on # beeing able to import borg.constants and then monkey patching borg.constants.PBKDF2_ITERATIONS from ._version import version as __version__ __version_tuple__ = tuple(LooseVersion(__version__).version[:3]) # assert that all semver components are integers # this is mainly to show errors when people repackage poorly # and setuptools_scm determines a 0.1.dev... version assert all(isinstance(v, int) for v in __version_tuple__), \ """\ broken borgbackup version metadata: %r version metadata is obtained dynamically on installation via setuptools_scm, please ensure your git repo has the correct tags or you provide the version using SETUPTOOLS_SCM_PRETEND_VERSION in your build script. """ % __version__ borgbackup-1.1.15/src/borg/chunker.pyx0000644000175000017500000000440113771325506017625 0ustar useruser00000000000000# cython: language_level=3 API_VERSION = '1.1_01' from libc.stdlib cimport free cdef extern from "_chunker.c": ctypedef int uint32_t ctypedef struct _Chunker "Chunker": pass _Chunker *chunker_init(int window_size, int chunk_mask, int min_size, int max_size, uint32_t seed) void chunker_set_fd(_Chunker *chunker, object f, int fd) void chunker_free(_Chunker *chunker) object chunker_process(_Chunker *chunker) uint32_t *buzhash_init_table(uint32_t seed) uint32_t c_buzhash "buzhash"(unsigned char *data, size_t len, uint32_t *h) uint32_t c_buzhash_update "buzhash_update"(uint32_t sum, unsigned char remove, unsigned char add, size_t len, uint32_t *h) cdef class Chunker: cdef _Chunker *chunker def __cinit__(self, int seed, int chunk_min_exp, int chunk_max_exp, int hash_mask_bits, int hash_window_size): min_size = 1 << chunk_min_exp max_size = 1 << chunk_max_exp # see chunker_process, first while loop condition, first term must be able to get True: assert hash_window_size + min_size + 1 <= max_size, "too small max_size" hash_mask = (1 << hash_mask_bits) - 1 self.chunker = chunker_init(hash_window_size, hash_mask, min_size, max_size, seed & 0xffffffff) def chunkify(self, fd, fh=-1): """ Cut a file into chunks. :param fd: Python file object :param fh: OS-level file handle (if available), defaults to -1 which means not to use OS-level fd. """ chunker_set_fd(self.chunker, fd, fh) return self def __dealloc__(self): if self.chunker: chunker_free(self.chunker) def __iter__(self): return self def __next__(self): return chunker_process(self.chunker) def buzhash(data, unsigned long seed): cdef uint32_t *table cdef uint32_t sum table = buzhash_init_table(seed & 0xffffffff) sum = c_buzhash( data, len(data), table) free(table) return sum def buzhash_update(uint32_t sum, unsigned char remove, unsigned char add, size_t len, unsigned long seed): cdef uint32_t *table table = buzhash_init_table(seed & 0xffffffff) sum = c_buzhash_update(sum, remove, add, len, table) free(table) return sum borgbackup-1.1.15/src/borg/testsuite/0000755000175000017500000000000013771325773017464 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/testsuite/item.py0000644000175000017500000001062313771325506020770 0ustar useruser00000000000000import pytest from ..cache import ChunkListEntry from ..item import Item from ..helpers import StableDict def test_item_empty(): item = Item() assert item.as_dict() == {} assert 'path' not in item with pytest.raises(ValueError): 'invalid-key' in item with pytest.raises(TypeError): b'path' in item with pytest.raises(TypeError): 42 in item assert item.get('mode') is None assert item.get('mode', 0o666) == 0o666 with pytest.raises(ValueError): item.get('invalid-key') with pytest.raises(TypeError): item.get(b'mode') with pytest.raises(TypeError): item.get(42) with pytest.raises(AttributeError): item.path with pytest.raises(AttributeError): del item.path def test_item_from_dict(): # does not matter whether we get str or bytes keys item = Item({b'path': '/a/b/c', b'mode': 0o666}) assert item.path == '/a/b/c' assert item.mode == 0o666 assert 'path' in item # does not matter whether we get str or bytes keys item = Item({'path': '/a/b/c', 'mode': 0o666}) assert item.path == '/a/b/c' assert item.mode == 0o666 assert 'mode' in item # invalid - no dict with pytest.raises(TypeError): Item(42) # invalid - no bytes/str key with pytest.raises(TypeError): Item({42: 23}) # invalid - unknown key with pytest.raises(ValueError): Item({'foobar': 'baz'}) def test_item_from_kw(): item = Item(path='/a/b/c', mode=0o666) assert item.path == '/a/b/c' assert item.mode == 0o666 def test_item_int_property(): item = Item() item.mode = 0o666 assert item.mode == 0o666 assert item.as_dict() == {'mode': 0o666} del item.mode assert item.as_dict() == {} with pytest.raises(TypeError): item.mode = "invalid" def test_item_bigint_property(): item = Item() small, big = 42, 2 ** 65 item.atime = small assert item.atime == small assert item.as_dict() == {'atime': small} item.atime = big assert item.atime == big assert item.as_dict() == {'atime': b'\0' * 8 + b'\x02'} def test_item_user_group_none(): item = Item() item.user = None assert item.user is None item.group = None assert item.group is None def test_item_se_str_property(): # start simple item = Item() item.path = '/a/b/c' assert item.path == '/a/b/c' assert item.as_dict() == {'path': b'/a/b/c'} del item.path assert item.as_dict() == {} with pytest.raises(TypeError): item.path = 42 # non-utf-8 path, needing surrogate-escaping for latin-1 u-umlaut item = Item(internal_dict={'path': b'/a/\xfc/c'}) assert item.path == '/a/\udcfc/c' # getting a surrogate-escaped representation assert item.as_dict() == {'path': b'/a/\xfc/c'} del item.path assert 'path' not in item item.path = '/a/\udcfc/c' # setting using a surrogate-escaped representation assert item.as_dict() == {'path': b'/a/\xfc/c'} def test_item_list_property(): item = Item() item.chunks = [] assert item.chunks == [] item.chunks.append(0) assert item.chunks == [0] item.chunks.append(1) assert item.chunks == [0, 1] assert item.as_dict() == {'chunks': [0, 1]} def test_item_dict_property(): item = Item() item.xattrs = StableDict() assert item.xattrs == StableDict() item.xattrs['foo'] = 'bar' assert item.xattrs['foo'] == 'bar' item.xattrs['bar'] = 'baz' assert item.xattrs == StableDict({'foo': 'bar', 'bar': 'baz'}) assert item.as_dict() == {'xattrs': {'foo': 'bar', 'bar': 'baz'}} def test_unknown_property(): # we do not want the user to be able to set unknown attributes - # they won't get into the .as_dict() result dictionary. # also they might be just typos of known attributes. item = Item() with pytest.raises(AttributeError): item.unknown_attribute = None def test_item_file_size(): item = Item(mode=0o100666, chunks=[ ChunkListEntry(csize=1, size=1000, id=None), ChunkListEntry(csize=1, size=2000, id=None), ]) assert item.get_size() == 3000 with pytest.raises(AssertionError): item.get_size(compressed=True, memorize=True) assert item.get_size(compressed=True) == 2 item.get_size(memorize=True) assert item.size == 3000 def test_item_file_size_no_chunks(): item = Item(mode=0o100666) assert item.get_size() == 0 borgbackup-1.1.15/src/borg/testsuite/logger.py0000644000175000017500000000304713771325506021313 0ustar useruser00000000000000import logging from io import StringIO import pytest from ..logger import find_parent_module, create_logger, setup_logging logger = create_logger() @pytest.fixture() def io_logger(): io = StringIO() handler = setup_logging(stream=io, env_var=None) handler.setFormatter(logging.Formatter('%(name)s: %(message)s')) logger.setLevel(logging.DEBUG) return io def test_setup_logging(io_logger): logger.info('hello world') assert io_logger.getvalue() == "borg.testsuite.logger: hello world\n" def test_multiple_loggers(io_logger): logger = logging.getLogger(__name__) logger.info('hello world 1') assert io_logger.getvalue() == "borg.testsuite.logger: hello world 1\n" logger = logging.getLogger('borg.testsuite.logger') logger.info('hello world 2') assert io_logger.getvalue() == "borg.testsuite.logger: hello world 1\nborg.testsuite.logger: hello world 2\n" io_logger.truncate(0) io_logger.seek(0) logger = logging.getLogger('borg.testsuite.logger') logger.info('hello world 2') assert io_logger.getvalue() == "borg.testsuite.logger: hello world 2\n" def test_parent_module(): assert find_parent_module() == __name__ def test_lazy_logger(): # just calling all the methods of the proxy logger.setLevel(logging.DEBUG) logger.debug("debug") logger.info("info") logger.warning("warning") logger.error("error") logger.critical("critical") logger.log(logging.INFO, "info") try: raise Exception except Exception: logger.exception("exception") borgbackup-1.1.15/src/borg/testsuite/__init__.py0000644000175000017500000003055013771325506021572 0ustar useruser00000000000000from contextlib import contextmanager import filecmp import functools import os import posix import stat import sys import sysconfig import tempfile import time import uuid import unittest from ..xattr import get_all from ..platform import get_flags from ..helpers import umount from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR from .. import platform # Note: this is used by borg.selftest, do not use or import py.test functionality here. try: import llfuse # Does this version of llfuse support ns precision? have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns') except ImportError: have_fuse_mtime_ns = False try: from pytest import raises except: # noqa raises = None has_lchflags = hasattr(os, 'lchflags') or sys.platform.startswith('linux') try: with tempfile.NamedTemporaryFile() as file: platform.set_flags(file.name, stat.UF_NODUMP) except OSError: has_lchflags = False try: import llfuse has_llfuse = True or llfuse # avoids "unused import" except ImportError: has_llfuse = False # The mtime get/set precision varies on different OS and Python versions if 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []): st_mtime_ns_round = 0 elif 'HAVE_UTIMES' in sysconfig.get_config_vars(): st_mtime_ns_round = -6 else: st_mtime_ns_round = -9 if sys.platform.startswith('netbsd'): st_mtime_ns_round = -4 # only >1 microsecond resolution here? @contextmanager def unopened_tempfile(): with tempfile.TemporaryDirectory() as tempdir: yield os.path.join(tempdir, "file") @functools.lru_cache() def are_symlinks_supported(): with unopened_tempfile() as filepath: try: os.symlink('somewhere', filepath) if os.stat(filepath, follow_symlinks=False) and os.readlink(filepath) == 'somewhere': return True except OSError: pass return False @functools.lru_cache() def are_hardlinks_supported(): if not hasattr(os, 'link'): # some pythons do not have os.link return False with unopened_tempfile() as file1path, unopened_tempfile() as file2path: open(file1path, 'w').close() try: os.link(file1path, file2path) stat1 = os.stat(file1path) stat2 = os.stat(file2path) if stat1.st_nlink == stat2.st_nlink == 2 and stat1.st_ino == stat2.st_ino: return True except OSError: pass return False @functools.lru_cache() def are_fifos_supported(): with unopened_tempfile() as filepath: try: os.mkfifo(filepath) return True except OSError: return False @functools.lru_cache() def is_utime_fully_supported(): with unopened_tempfile() as filepath: # Some filesystems (such as SSHFS) don't support utime on symlinks if are_symlinks_supported(): os.symlink('something', filepath) else: open(filepath, 'w').close() try: os.utime(filepath, (1000, 2000), follow_symlinks=False) new_stats = os.stat(filepath, follow_symlinks=False) if new_stats.st_atime == 1000 and new_stats.st_mtime == 2000: return True except OSError: pass return False @functools.lru_cache() def is_birthtime_fully_supported(): if not hasattr(os.stat_result, 'st_birthtime'): return False with unopened_tempfile() as filepath: # Some filesystems (such as SSHFS) don't support utime on symlinks if are_symlinks_supported(): os.symlink('something', filepath) else: open(filepath, 'w').close() try: birthtime, mtime, atime = 946598400, 946684800, 946771200 os.utime(filepath, (atime, birthtime), follow_symlinks=False) os.utime(filepath, (atime, mtime), follow_symlinks=False) new_stats = os.stat(filepath, follow_symlinks=False) if new_stats.st_birthtime == birthtime and new_stats.st_mtime == mtime and new_stats.st_atime == atime: return True except OSError: pass return False def no_selinux(x): # selinux fails our FUSE tests, thus ignore selinux xattrs SELINUX_KEY = 'security.selinux' if isinstance(x, dict): return {k: v for k, v in x.items() if k != SELINUX_KEY} if isinstance(x, list): return [k for k in x if k != SELINUX_KEY] class BaseTestCase(unittest.TestCase): """ """ assert_in = unittest.TestCase.assertIn assert_not_in = unittest.TestCase.assertNotIn assert_equal = unittest.TestCase.assertEqual assert_not_equal = unittest.TestCase.assertNotEqual assert_true = unittest.TestCase.assertTrue if raises: assert_raises = staticmethod(raises) else: assert_raises = unittest.TestCase.assertRaises @contextmanager def assert_creates_file(self, path): self.assert_true(not os.path.exists(path), '{} should not exist'.format(path)) yield self.assert_true(os.path.exists(path), '{} should exist'.format(path)) def assert_dirs_equal(self, dir1, dir2, **kwargs): diff = filecmp.dircmp(dir1, dir2) self._assert_dirs_equal_cmp(diff, **kwargs) def _assert_dirs_equal_cmp(self, diff, ignore_bsdflags=False, ignore_xattrs=False, ignore_ns=False): self.assert_equal(diff.left_only, []) self.assert_equal(diff.right_only, []) self.assert_equal(diff.diff_files, []) self.assert_equal(diff.funny_files, []) for filename in diff.common: path1 = os.path.join(diff.left, filename) path2 = os.path.join(diff.right, filename) s1 = os.stat(path1, follow_symlinks=False) s2 = os.stat(path2, follow_symlinks=False) # Assume path2 is on FUSE if st_dev is different fuse = s1.st_dev != s2.st_dev attrs = ['st_uid', 'st_gid', 'st_rdev'] if not fuse or not os.path.isdir(path1): # dir nlink is always 1 on our FUSE filesystem attrs.append('st_nlink') d1 = [filename] + [getattr(s1, a) for a in attrs] d2 = [filename] + [getattr(s2, a) for a in attrs] d1.insert(1, oct(s1.st_mode)) d2.insert(1, oct(s2.st_mode)) if not ignore_bsdflags: d1.append(get_flags(path1, s1)) d2.append(get_flags(path2, s2)) # ignore st_rdev if file is not a block/char device, fixes #203 if not stat.S_ISCHR(s1.st_mode) and not stat.S_ISBLK(s1.st_mode): d1[4] = None if not stat.S_ISCHR(s2.st_mode) and not stat.S_ISBLK(s2.st_mode): d2[4] = None # If utime isn't fully supported, borg can't set mtime. # Therefore, we shouldn't test it in that case. if is_utime_fully_supported(): # Older versions of llfuse do not support ns precision properly if ignore_ns: d1.append(int(s1.st_mtime_ns / 1e9)) d2.append(int(s2.st_mtime_ns / 1e9)) elif fuse and not have_fuse_mtime_ns: d1.append(round(s1.st_mtime_ns, -4)) d2.append(round(s2.st_mtime_ns, -4)) else: d1.append(round(s1.st_mtime_ns, st_mtime_ns_round)) d2.append(round(s2.st_mtime_ns, st_mtime_ns_round)) if not ignore_xattrs: d1.append(no_selinux(get_all(path1, follow_symlinks=False))) d2.append(no_selinux(get_all(path2, follow_symlinks=False))) self.assert_equal(d1, d2) for sub_diff in diff.subdirs.values(): self._assert_dirs_equal_cmp(sub_diff, ignore_bsdflags=ignore_bsdflags, ignore_xattrs=ignore_xattrs, ignore_ns=ignore_ns) @contextmanager def fuse_mount(self, location, mountpoint=None, *options, **kwargs): if mountpoint is None: mountpoint = tempfile.mkdtemp() else: os.mkdir(mountpoint) if 'fork' not in kwargs: # For a successful mount, `fork = True` is required for # the borg mount daemon to work properly or the tests # will just freeze. Therefore, if argument `fork` is not # specified, the default value is `True`, regardless of # `FORK_DEFAULT`. However, leaving the possibilty to run # the command with `fork = False` is still necessary for # testing for mount failures, for example attempting to # mount a read-only repo. kwargs['fork'] = True self.cmd('mount', location, mountpoint, *options, **kwargs) if kwargs.get('exit_code', EXIT_SUCCESS) == EXIT_ERROR: # If argument `exit_code = EXIT_ERROR`, then this call # is testing the behavior of an unsuccessful mount and # we must not continue, as there is no mount to work # with. The test itself has already failed or succeeded # with the call to `self.cmd`, above. yield return self.wait_for_mountstate(mountpoint, mounted=True) yield umount(mountpoint) self.wait_for_mountstate(mountpoint, mounted=False) os.rmdir(mountpoint) # Give the daemon some time to exit time.sleep(0.2) def wait_for_mountstate(self, mountpoint, *, mounted, timeout=5): """Wait until a path meets specified mount point status""" timeout += time.time() while timeout > time.time(): if os.path.ismount(mountpoint) == mounted: return time.sleep(0.1) message = 'Waiting for %s of %s' % ('mount' if mounted else 'umount', mountpoint) raise TimeoutError(message) @contextmanager def read_only(self, path): """Some paths need to be made read-only for testing If the tests are executed inside a fakeroot environment, the changes from chmod won't affect the real permissions of that folder. This issue is circumvented by temporarily disabling fakeroot with `LD_PRELOAD=`. Using chmod to remove write permissions is not enough if the tests are running with root privileges. Instead, the folder is rendered immutable with chattr or chflags, respectively. """ if sys.platform.startswith('linux'): cmd_immutable = 'chattr +i "%s"' % path cmd_mutable = 'chattr -i "%s"' % path elif sys.platform.startswith(('darwin', 'freebsd', 'netbsd', 'openbsd')): cmd_immutable = 'chflags uchg "%s"' % path cmd_mutable = 'chflags nouchg "%s"' % path elif sys.platform.startswith('sunos'): # openindiana cmd_immutable = 'chmod S+vimmutable "%s"' % path cmd_mutable = 'chmod S-vimmutable "%s"' % path else: message = 'Testing read-only repos is not supported on platform %s' % sys.platform self.skipTest(message) try: os.system('LD_PRELOAD= chmod -R ugo-w "%s"' % path) os.system(cmd_immutable) yield finally: # Restore permissions to ensure clean-up doesn't fail os.system(cmd_mutable) os.system('LD_PRELOAD= chmod -R ugo+w "%s"' % path) class changedir: def __init__(self, dir): self.dir = dir def __enter__(self): self.old = os.getcwd() os.chdir(self.dir) def __exit__(self, *args, **kw): os.chdir(self.old) class environment_variable: def __init__(self, **values): self.values = values self.old_values = {} def __enter__(self): for k, v in self.values.items(): self.old_values[k] = os.environ.get(k) if v is None: os.environ.pop(k, None) else: os.environ[k] = v def __exit__(self, *args, **kw): for k, v in self.old_values.items(): if v is None: os.environ.pop(k, None) else: os.environ[k] = v class FakeInputs: """Simulate multiple user inputs, can be used as input() replacement""" def __init__(self, inputs): self.inputs = inputs def __call__(self, prompt=None): if prompt is not None: print(prompt, end='') try: return self.inputs.pop(0) except IndexError: raise EOFError from None borgbackup-1.1.15/src/borg/testsuite/platform.py0000644000175000017500000002077413771325506021666 0ustar useruser00000000000000import functools import os import random import shutil import sys import tempfile import pwd import unittest from ..platform import acl_get, acl_set, swidth from ..platform import get_process_id, process_alive from . import BaseTestCase, unopened_tempfile from .locking import free_pid ACCESS_ACL = """ user::rw- user:root:rw-:0 user:9999:r--:9999 group::r-- group:root:r--:0 group:9999:r--:9999 mask::rw- other::r-- """.strip().encode('ascii') DEFAULT_ACL = """ user::rw- user:root:r--:0 user:8888:r--:8888 group::r-- group:root:r--:0 group:8888:r--:8888 mask::rw- other::r-- """.strip().encode('ascii') _acls_working = None def fakeroot_detected(): return 'FAKEROOTKEY' in os.environ def user_exists(username): try: pwd.getpwnam(username) return True except (KeyError, ValueError): return False @functools.lru_cache() def are_acls_working(): with unopened_tempfile() as filepath: open(filepath, 'w').close() try: access = b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n' acl = {'acl_access': access} acl_set(filepath, acl) read_acl = {} acl_get(filepath, read_acl, os.stat(filepath)) read_acl_access = read_acl.get('acl_access', None) if read_acl_access and b'user::rw-' in read_acl_access: return True except PermissionError: pass return False @unittest.skipUnless(sys.platform.startswith('linux'), 'linux only test') @unittest.skipIf(fakeroot_detected(), 'not compatible with fakeroot') class PlatformLinuxTestCase(BaseTestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def get_acl(self, path, numeric_owner=False): item = {} acl_get(path, item, os.stat(path), numeric_owner=numeric_owner) return item def set_acl(self, path, access=None, default=None, numeric_owner=False): item = {'acl_access': access, 'acl_default': default} acl_set(path, item, numeric_owner=numeric_owner) @unittest.skipIf(not are_acls_working(), 'ACLs do not work') def test_access_acl(self): file = tempfile.NamedTemporaryFile() self.assert_equal(self.get_acl(file.name), {}) self.set_acl(file.name, access=b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n', numeric_owner=False) self.assert_in(b'user:root:rw-:0', self.get_acl(file.name)['acl_access']) self.assert_in(b'group:root:rw-:0', self.get_acl(file.name)['acl_access']) self.assert_in(b'user:0:rw-:0', self.get_acl(file.name, numeric_owner=True)['acl_access']) file2 = tempfile.NamedTemporaryFile() self.set_acl(file2.name, access=b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n', numeric_owner=True) self.assert_in(b'user:9999:rw-:9999', self.get_acl(file2.name)['acl_access']) self.assert_in(b'group:9999:rw-:9999', self.get_acl(file2.name)['acl_access']) @unittest.skipIf(not are_acls_working(), 'ACLs do not work') def test_default_acl(self): self.assert_equal(self.get_acl(self.tmpdir), {}) self.set_acl(self.tmpdir, access=ACCESS_ACL, default=DEFAULT_ACL) self.assert_equal(self.get_acl(self.tmpdir)['acl_access'], ACCESS_ACL) self.assert_equal(self.get_acl(self.tmpdir)['acl_default'], DEFAULT_ACL) @unittest.skipIf(not user_exists('übel'), 'requires übel user') @unittest.skipIf(not are_acls_working(), 'ACLs do not work') def test_non_ascii_acl(self): # Testing non-ascii ACL processing to see whether our code is robust. # I have no idea whether non-ascii ACLs are allowed by the standard, # but in practice they seem to be out there and must not make our code explode. file = tempfile.NamedTemporaryFile() self.assert_equal(self.get_acl(file.name), {}) nothing_special = 'user::rw-\ngroup::r--\nmask::rw-\nother::---\n'.encode('ascii') # TODO: can this be tested without having an existing system user übel with uid 666 gid 666? user_entry = 'user:übel:rw-:666'.encode('utf-8') user_entry_numeric = 'user:666:rw-:666'.encode('ascii') group_entry = 'group:übel:rw-:666'.encode('utf-8') group_entry_numeric = 'group:666:rw-:666'.encode('ascii') acl = b'\n'.join([nothing_special, user_entry, group_entry]) self.set_acl(file.name, access=acl, numeric_owner=False) acl_access = self.get_acl(file.name, numeric_owner=False)['acl_access'] self.assert_in(user_entry, acl_access) self.assert_in(group_entry, acl_access) acl_access_numeric = self.get_acl(file.name, numeric_owner=True)['acl_access'] self.assert_in(user_entry_numeric, acl_access_numeric) self.assert_in(group_entry_numeric, acl_access_numeric) file2 = tempfile.NamedTemporaryFile() self.set_acl(file2.name, access=acl, numeric_owner=True) acl_access = self.get_acl(file2.name, numeric_owner=False)['acl_access'] self.assert_in(user_entry, acl_access) self.assert_in(group_entry, acl_access) acl_access_numeric = self.get_acl(file.name, numeric_owner=True)['acl_access'] self.assert_in(user_entry_numeric, acl_access_numeric) self.assert_in(group_entry_numeric, acl_access_numeric) def test_utils(self): from ..platform.linux import acl_use_local_uid_gid self.assert_equal(acl_use_local_uid_gid(b'user:nonexistent1234:rw-:1234'), b'user:1234:rw-') self.assert_equal(acl_use_local_uid_gid(b'group:nonexistent1234:rw-:1234'), b'group:1234:rw-') self.assert_equal(acl_use_local_uid_gid(b'user:root:rw-:0'), b'user:0:rw-') self.assert_equal(acl_use_local_uid_gid(b'group:root:rw-:0'), b'group:0:rw-') @unittest.skipUnless(sys.platform.startswith('darwin'), 'OS X only test') @unittest.skipIf(fakeroot_detected(), 'not compatible with fakeroot') class PlatformDarwinTestCase(BaseTestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def get_acl(self, path, numeric_owner=False): item = {} acl_get(path, item, os.stat(path), numeric_owner=numeric_owner) return item def set_acl(self, path, acl, numeric_owner=False): item = {'acl_extended': acl} acl_set(path, item, numeric_owner=numeric_owner) @unittest.skipIf(not are_acls_working(), 'ACLs do not work') def test_access_acl(self): file = tempfile.NamedTemporaryFile() file2 = tempfile.NamedTemporaryFile() self.assert_equal(self.get_acl(file.name), {}) self.set_acl(file.name, b'!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n', numeric_owner=False) self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000014:staff:20:allow:read', self.get_acl(file.name)['acl_extended']) self.assert_in(b'user:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read', self.get_acl(file.name)['acl_extended']) self.set_acl(file2.name, b'!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n', numeric_owner=True) self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:wheel:0:allow:read', self.get_acl(file2.name)['acl_extended']) self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000::0:allow:read', self.get_acl(file2.name, numeric_owner=True)['acl_extended']) @unittest.skipUnless(sys.platform.startswith(('linux', 'freebsd', 'darwin')), 'POSIX only tests') class PlatformPosixTestCase(BaseTestCase): def test_swidth_ascii(self): self.assert_equal(swidth("borg"), 4) def test_swidth_cjk(self): self.assert_equal(swidth("バックアップ"), 6 * 2) def test_swidth_mixed(self): self.assert_equal(swidth("borgバックアップ"), 4 + 6 * 2) def test_process_alive(free_pid): id = get_process_id() assert process_alive(*id) host, pid, tid = id assert process_alive(host + 'abc', pid, tid) assert process_alive(host, pid, tid + 1) assert not process_alive(host, free_pid, tid) def test_process_id(): hostname, pid, tid = get_process_id() assert isinstance(hostname, str) assert isinstance(pid, int) assert isinstance(tid, int) assert len(hostname) > 0 assert pid > 0 assert get_process_id() == (hostname, pid, tid) borgbackup-1.1.15/src/borg/testsuite/shellpattern.py0000644000175000017500000000614113771325506022537 0ustar useruser00000000000000import re import pytest from .. import shellpattern def check(path, pattern): compiled = re.compile(shellpattern.translate(pattern)) return bool(compiled.match(path)) @pytest.mark.parametrize("path, patterns", [ # Literal string ("foo/bar", ["foo/bar"]), ("foo\\bar", ["foo\\bar"]), # Non-ASCII ("foo/c/\u0152/e/bar", ["foo/*/\u0152/*/bar", "*/*/\u0152/*/*", "**/\u0152/*/*"]), ("\u00e4\u00f6\u00dc", ["???", "*", "\u00e4\u00f6\u00dc", "[\u00e4][\u00f6][\u00dc]"]), # Question mark ("foo", ["fo?"]), ("foo", ["f?o"]), ("foo", ["f??"]), ("foo", ["?oo"]), ("foo", ["?o?"]), ("foo", ["??o"]), ("foo", ["???"]), # Single asterisk ("", ["*"]), ("foo", ["*", "**", "***"]), ("foo", ["foo*"]), ("foobar", ["foo*"]), ("foobar", ["foo*bar"]), ("foobarbaz", ["foo*baz"]), ("bar", ["*bar"]), ("foobar", ["*bar"]), ("foo/bar", ["foo/*bar"]), ("foo/bar", ["foo/*ar"]), ("foo/bar", ["foo/*r"]), ("foo/bar", ["foo/*"]), ("foo/bar", ["foo*/bar"]), ("foo/bar", ["fo*/bar"]), ("foo/bar", ["f*/bar"]), ("foo/bar", ["*/bar"]), # Double asterisk (matches 0..n directory layers) ("foo/bar", ["foo/**/bar"]), ("foo/1/bar", ["foo/**/bar"]), ("foo/1/22/333/bar", ["foo/**/bar"]), ("foo/", ["foo/**/"]), ("foo/1/", ["foo/**/"]), ("foo/1/22/333/", ["foo/**/"]), ("bar", ["**/bar"]), ("1/bar", ["**/bar"]), ("1/22/333/bar", ["**/bar"]), ("foo/bar/baz", ["foo/**/*"]), # Set ("foo1", ["foo[12]"]), ("foo2", ["foo[12]"]), ("foo2/bar", ["foo[12]/*"]), ("f??f", ["f??f", "f[?][?]f"]), ("foo]", ["foo[]]"]), # Inverted set ("foo3", ["foo[!12]"]), ("foo^", ["foo[^!]"]), ("foo!", ["foo[^!]"]), ]) def test_match(path, patterns): for p in patterns: assert check(path, p) @pytest.mark.parametrize("path, patterns", [ ("", ["?", "[]"]), ("foo", ["foo?"]), ("foo", ["?foo"]), ("foo", ["f?oo"]), # do not match path separator ("foo/ar", ["foo?ar"]), # do not match/cross over os.path.sep ("foo/bar", ["*"]), ("foo/bar", ["foo*bar"]), ("foo/bar", ["foo*ar"]), ("foo/bar", ["fo*bar"]), ("foo/bar", ["fo*ar"]), # Double asterisk ("foobar", ["foo/**/bar"]), # Two asterisks without slash do not match directory separator ("foo/bar", ["**"]), # Double asterisk not matching filename ("foo/bar", ["**/"]), # Set ("foo3", ["foo[12]"]), # Inverted set ("foo1", ["foo[!12]"]), ("foo2", ["foo[!12]"]), ]) def test_mismatch(path, patterns): for p in patterns: assert not check(path, p) def test_match_end(): regex = shellpattern.translate("*-home") # default is match_end == string end assert re.match(regex, '2017-07-03-home') assert not re.match(regex, '2017-07-03-home.checkpoint') match_end = r'(%s)?\Z' % r'\.checkpoint(\.\d+)?' # with/without checkpoint ending regex = shellpattern.translate("*-home", match_end=match_end) assert re.match(regex, '2017-07-03-home') assert re.match(regex, '2017-07-03-home.checkpoint') borgbackup-1.1.15/src/borg/testsuite/locking.py0000644000175000017500000002271413771325506021464 0ustar useruser00000000000000import random import time import pytest from ..helpers import daemonize from ..platform import get_process_id, process_alive from ..locking import TimeoutTimer, ExclusiveLock, Lock, LockRoster, \ ADD, REMOVE, SHARED, EXCLUSIVE, LockTimeout, NotLocked, NotMyLock ID1 = "foo", 1, 1 ID2 = "bar", 2, 2 @pytest.fixture() def free_pid(): """Return a free PID not used by any process (naturally this is racy)""" host, pid, tid = get_process_id() while True: # PIDs are often restricted to a small range. On Linux the range >32k is by default not used. pid = random.randint(33000, 65000) if not process_alive(host, pid, tid): return pid class TestTimeoutTimer: def test_timeout(self): timeout = 0.5 t = TimeoutTimer(timeout).start() assert not t.timed_out() time.sleep(timeout * 1.5) assert t.timed_out() def test_notimeout_sleep(self): timeout, sleep = None, 0.5 t = TimeoutTimer(timeout, sleep).start() assert not t.timed_out_or_sleep() assert time.time() >= t.start_time + 1 * sleep assert not t.timed_out_or_sleep() assert time.time() >= t.start_time + 2 * sleep @pytest.fixture() def lockpath(tmpdir): return str(tmpdir.join('lock')) class TestExclusiveLock: def test_checks(self, lockpath): with ExclusiveLock(lockpath, timeout=1) as lock: assert lock.is_locked() and lock.by_me() def test_acquire_break_reacquire(self, lockpath): lock = ExclusiveLock(lockpath, id=ID1).acquire() lock.break_lock() with ExclusiveLock(lockpath, id=ID2): pass def test_timeout(self, lockpath): with ExclusiveLock(lockpath, id=ID1): with pytest.raises(LockTimeout): ExclusiveLock(lockpath, id=ID2, timeout=0.1).acquire() def test_kill_stale(self, lockpath, free_pid): host, pid, tid = our_id = get_process_id() dead_id = host, free_pid, tid cant_know_if_dead_id = 'foo.bar.example.net', 1, 2 dead_lock = ExclusiveLock(lockpath, id=dead_id).acquire() with ExclusiveLock(lockpath, id=our_id, kill_stale_locks=True): with pytest.raises(NotMyLock): dead_lock.release() with pytest.raises(NotLocked): dead_lock.release() with ExclusiveLock(lockpath, id=cant_know_if_dead_id): with pytest.raises(LockTimeout): ExclusiveLock(lockpath, id=our_id, kill_stale_locks=True, timeout=0.1).acquire() def test_migrate_lock(self, lockpath): old_id, new_id = ID1, ID2 assert old_id[1] != new_id[1] # different PIDs (like when doing daemonize()) lock = ExclusiveLock(lockpath, id=old_id).acquire() assert lock.id == old_id # lock is for old id / PID old_unique_name = lock.unique_name assert lock.by_me() # we have the lock lock.migrate_lock(old_id, new_id) # fix the lock assert lock.id == new_id # lock corresponds to the new id / PID new_unique_name = lock.unique_name assert lock.by_me() # we still have the lock assert old_unique_name != new_unique_name # locking filename is different now class TestLock: def test_shared(self, lockpath): lock1 = Lock(lockpath, exclusive=False, id=ID1).acquire() lock2 = Lock(lockpath, exclusive=False, id=ID2).acquire() assert len(lock1._roster.get(SHARED)) == 2 assert len(lock1._roster.get(EXCLUSIVE)) == 0 assert not lock1._roster.empty(SHARED, EXCLUSIVE) assert lock1._roster.empty(EXCLUSIVE) lock1.release() lock2.release() def test_exclusive(self, lockpath): with Lock(lockpath, exclusive=True, id=ID1) as lock: assert len(lock._roster.get(SHARED)) == 0 assert len(lock._roster.get(EXCLUSIVE)) == 1 assert not lock._roster.empty(SHARED, EXCLUSIVE) def test_upgrade(self, lockpath): with Lock(lockpath, exclusive=False) as lock: lock.upgrade() lock.upgrade() # NOP assert len(lock._roster.get(SHARED)) == 0 assert len(lock._roster.get(EXCLUSIVE)) == 1 assert not lock._roster.empty(SHARED, EXCLUSIVE) def test_downgrade(self, lockpath): with Lock(lockpath, exclusive=True) as lock: lock.downgrade() lock.downgrade() # NOP assert len(lock._roster.get(SHARED)) == 1 assert len(lock._roster.get(EXCLUSIVE)) == 0 def test_got_exclusive_lock(self, lockpath): lock = Lock(lockpath, exclusive=True, id=ID1) assert not lock.got_exclusive_lock() lock.acquire() assert lock.got_exclusive_lock() lock.release() assert not lock.got_exclusive_lock() def test_break(self, lockpath): lock = Lock(lockpath, exclusive=True, id=ID1).acquire() lock.break_lock() assert len(lock._roster.get(SHARED)) == 0 assert len(lock._roster.get(EXCLUSIVE)) == 0 with Lock(lockpath, exclusive=True, id=ID2): pass def test_timeout(self, lockpath): with Lock(lockpath, exclusive=False, id=ID1): with pytest.raises(LockTimeout): Lock(lockpath, exclusive=True, id=ID2, timeout=0.1).acquire() with Lock(lockpath, exclusive=True, id=ID1): with pytest.raises(LockTimeout): Lock(lockpath, exclusive=False, id=ID2, timeout=0.1).acquire() with Lock(lockpath, exclusive=True, id=ID1): with pytest.raises(LockTimeout): Lock(lockpath, exclusive=True, id=ID2, timeout=0.1).acquire() def test_kill_stale(self, lockpath, free_pid): host, pid, tid = our_id = get_process_id() dead_id = host, free_pid, tid cant_know_if_dead_id = 'foo.bar.example.net', 1, 2 dead_lock = Lock(lockpath, id=dead_id, exclusive=True).acquire() roster = dead_lock._roster with Lock(lockpath, id=our_id, kill_stale_locks=True): assert roster.get(EXCLUSIVE) == set() assert roster.get(SHARED) == {our_id} assert roster.get(EXCLUSIVE) == set() assert roster.get(SHARED) == set() with pytest.raises(KeyError): dead_lock.release() with Lock(lockpath, id=cant_know_if_dead_id, exclusive=True): with pytest.raises(LockTimeout): Lock(lockpath, id=our_id, kill_stale_locks=True, timeout=0.1).acquire() def test_migrate_lock(self, lockpath): old_id, new_id = ID1, ID2 assert old_id[1] != new_id[1] # different PIDs (like when doing daemonize()) lock = Lock(lockpath, id=old_id, exclusive=True).acquire() assert lock.id == old_id lock.migrate_lock(old_id, new_id) # fix the lock assert lock.id == new_id lock.release() lock = Lock(lockpath, id=old_id, exclusive=False).acquire() assert lock.id == old_id lock.migrate_lock(old_id, new_id) # fix the lock assert lock.id == new_id lock.release() @pytest.fixture() def rosterpath(tmpdir): return str(tmpdir.join('roster')) class TestLockRoster: def test_empty(self, rosterpath): roster = LockRoster(rosterpath) empty = roster.load() roster.save(empty) assert empty == {} def test_modify_get(self, rosterpath): roster1 = LockRoster(rosterpath, id=ID1) assert roster1.get(SHARED) == set() roster1.modify(SHARED, ADD) assert roster1.get(SHARED) == {ID1, } roster2 = LockRoster(rosterpath, id=ID2) roster2.modify(SHARED, ADD) assert roster2.get(SHARED) == {ID1, ID2, } roster1 = LockRoster(rosterpath, id=ID1) roster1.modify(SHARED, REMOVE) assert roster1.get(SHARED) == {ID2, } roster2 = LockRoster(rosterpath, id=ID2) roster2.modify(SHARED, REMOVE) assert roster2.get(SHARED) == set() def test_kill_stale(self, rosterpath, free_pid): host, pid, tid = our_id = get_process_id() dead_id = host, free_pid, tid roster1 = LockRoster(rosterpath, id=dead_id) assert roster1.get(SHARED) == set() roster1.modify(SHARED, ADD) assert roster1.get(SHARED) == {dead_id} cant_know_if_dead_id = 'foo.bar.example.net', 1, 2 roster1 = LockRoster(rosterpath, id=cant_know_if_dead_id) assert roster1.get(SHARED) == {dead_id} roster1.modify(SHARED, ADD) assert roster1.get(SHARED) == {dead_id, cant_know_if_dead_id} killer_roster = LockRoster(rosterpath, kill_stale_locks=True) # Did kill the dead processes lock (which was alive ... I guess?!) assert killer_roster.get(SHARED) == {cant_know_if_dead_id} killer_roster.modify(SHARED, ADD) assert killer_roster.get(SHARED) == {our_id, cant_know_if_dead_id} other_killer_roster = LockRoster(rosterpath, kill_stale_locks=True) # Did not kill us, since we're alive assert other_killer_roster.get(SHARED) == {our_id, cant_know_if_dead_id} def test_migrate_lock(self, rosterpath): old_id, new_id = ID1, ID2 assert old_id[1] != new_id[1] # different PIDs (like when doing daemonize()) roster = LockRoster(rosterpath, id=old_id) assert roster.id == old_id roster.modify(SHARED, ADD) assert roster.get(SHARED) == {old_id} roster.migrate_lock(SHARED, old_id, new_id) # fix the lock assert roster.id == new_id assert roster.get(SHARED) == {new_id} borgbackup-1.1.15/src/borg/testsuite/file_integrity.py0000644000175000017500000001423013771325506023045 0ustar useruser00000000000000 import pytest from ..crypto.file_integrity import IntegrityCheckedFile, DetachedIntegrityCheckedFile, FileIntegrityError class TestReadIntegrityFile: def test_no_integrity(self, tmpdir): protected_file = tmpdir.join('file') protected_file.write('1234') assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None def test_truncated_integrity(self, tmpdir): protected_file = tmpdir.join('file') protected_file.write('1234') tmpdir.join('file.integrity').write('') with pytest.raises(FileIntegrityError): DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) def test_unknown_algorithm(self, tmpdir): protected_file = tmpdir.join('file') protected_file.write('1234') tmpdir.join('file.integrity').write('{"algorithm": "HMAC_SERIOUSHASH", "digests": "1234"}') assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None @pytest.mark.parametrize('json', ( '{"ALGORITHM": "HMAC_SERIOUSHASH", "digests": "1234"}', '[]', '1234.5', '"A string"', 'Invalid JSON', )) def test_malformed(self, tmpdir, json): protected_file = tmpdir.join('file') protected_file.write('1234') tmpdir.join('file.integrity').write(json) with pytest.raises(FileIntegrityError): DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) class TestDetachedIntegrityCheckedFile: @pytest.fixture def integrity_protected_file(self, tmpdir): path = str(tmpdir.join('file')) with DetachedIntegrityCheckedFile(path, write=True) as fd: fd.write(b'foo and bar') return path def test_simple(self, tmpdir, integrity_protected_file): assert tmpdir.join('file').check(file=True) assert tmpdir.join('file.integrity').check(file=True) with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: assert fd.read() == b'foo and bar' def test_corrupted_file(self, integrity_protected_file): with open(integrity_protected_file, 'ab') as fd: fd.write(b' extra data') with pytest.raises(FileIntegrityError): with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: assert fd.read() == b'foo and bar extra data' def test_corrupted_file_partial_read(self, integrity_protected_file): with open(integrity_protected_file, 'ab') as fd: fd.write(b' extra data') with pytest.raises(FileIntegrityError): with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: data = b'foo and bar' assert fd.read(len(data)) == data @pytest.mark.parametrize('new_name', ( 'different_file', 'different_file.different_ext', )) def test_renamed_file(self, tmpdir, integrity_protected_file, new_name): new_path = tmpdir.join(new_name) tmpdir.join('file').move(new_path) tmpdir.join('file.integrity').move(new_path + '.integrity') with pytest.raises(FileIntegrityError): with DetachedIntegrityCheckedFile(str(new_path), write=False) as fd: assert fd.read() == b'foo and bar' def test_moved_file(self, tmpdir, integrity_protected_file): new_dir = tmpdir.mkdir('another_directory') tmpdir.join('file').move(new_dir.join('file')) tmpdir.join('file.integrity').move(new_dir.join('file.integrity')) new_path = str(new_dir.join('file')) with DetachedIntegrityCheckedFile(new_path, write=False) as fd: assert fd.read() == b'foo and bar' def test_no_integrity(self, tmpdir, integrity_protected_file): tmpdir.join('file.integrity').remove() with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: assert fd.read() == b'foo and bar' class TestDetachedIntegrityCheckedFileParts: @pytest.fixture def integrity_protected_file(self, tmpdir): path = str(tmpdir.join('file')) with DetachedIntegrityCheckedFile(path, write=True) as fd: fd.write(b'foo and bar') fd.hash_part('foopart') fd.write(b' other data') return path def test_simple(self, integrity_protected_file): with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: data1 = b'foo and bar' assert fd.read(len(data1)) == data1 fd.hash_part('foopart') assert fd.read() == b' other data' def test_wrong_part_name(self, integrity_protected_file): with pytest.raises(FileIntegrityError): # Because some hash_part failed, the final digest will fail as well - again - even if we catch # the failing hash_part. This is intentional: (1) it makes the code simpler (2) it's a good fail-safe # against overly broad exception handling. with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: data1 = b'foo and bar' assert fd.read(len(data1)) == data1 with pytest.raises(FileIntegrityError): # This specific bit raises it directly fd.hash_part('barpart') # Still explodes in the end. @pytest.mark.parametrize('partial_read', (False, True)) def test_part_independence(self, integrity_protected_file, partial_read): with open(integrity_protected_file, 'ab') as fd: fd.write(b'some extra stuff that does not belong') with pytest.raises(FileIntegrityError): with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: data1 = b'foo and bar' try: assert fd.read(len(data1)) == data1 fd.hash_part('foopart') except FileIntegrityError: assert False, 'This part must not raise, since this part is still valid.' if not partial_read: fd.read() # But overall it explodes with the final digest. Neat, eh? borgbackup-1.1.15/src/borg/testsuite/lrucache.py0000644000175000017500000000263313771325506021622 0ustar useruser00000000000000from tempfile import TemporaryFile import pytest from ..lrucache import LRUCache class TestLRUCache: def test_lrucache(self): c = LRUCache(2, dispose=lambda _: None) assert len(c) == 0 assert c.items() == set() for i, x in enumerate('abc'): c[x] = i assert len(c) == 2 assert c.items() == set([('b', 1), ('c', 2)]) assert 'a' not in c assert 'b' in c with pytest.raises(KeyError): c['a'] assert c.get('a') is None assert c.get('a', 'foo') == 'foo' assert c['b'] == 1 assert c.get('b') == 1 assert c['c'] == 2 c['d'] = 3 assert len(c) == 2 assert c['c'] == 2 assert c['d'] == 3 del c['c'] assert len(c) == 1 with pytest.raises(KeyError): c['c'] assert c['d'] == 3 c.clear() assert c.items() == set() def test_dispose(self): c = LRUCache(2, dispose=lambda f: f.close()) f1 = TemporaryFile() f2 = TemporaryFile() f3 = TemporaryFile() c[1] = f1 c[2] = f2 assert not f2.closed c[3] = f3 assert 1 not in c assert f1.closed assert 2 in c assert not f2.closed del c[2] assert 2 not in c assert f2.closed c.clear() assert c.items() == set() assert f3.closed borgbackup-1.1.15/src/borg/testsuite/repository.py0000644000175000017500000012117013771325506022251 0ustar useruser00000000000000import io import logging import os import shutil import sys import tempfile from unittest.mock import patch import pytest from ..hashindex import NSIndex from ..helpers import Location from ..helpers import IntegrityError from ..helpers import msgpack from ..locking import Lock, LockFailed from ..remote import RemoteRepository, InvalidRPCMethod, PathNotAllowed, ConnectionClosedWithHint, handle_remote_line from ..repository import Repository, LoggedIO, MAGIC, MAX_DATA_SIZE, TAG_DELETE from . import BaseTestCase from .hashindex import H UNSPECIFIED = object() # for default values where we can't use None class RepositoryTestCaseBase(BaseTestCase): key_size = 32 exclusive = True def open(self, create=False, exclusive=UNSPECIFIED): if exclusive is UNSPECIFIED: exclusive = self.exclusive return Repository(os.path.join(self.tmppath, 'repository'), exclusive=exclusive, create=create) def setUp(self): self.tmppath = tempfile.mkdtemp() self.repository = self.open(create=True) self.repository.__enter__() def tearDown(self): self.repository.close() shutil.rmtree(self.tmppath) def reopen(self, exclusive=UNSPECIFIED): if self.repository: self.repository.close() self.repository = self.open(exclusive=exclusive) def add_keys(self): self.repository.put(H(0), b'foo') self.repository.put(H(1), b'bar') self.repository.put(H(3), b'bar') self.repository.commit() self.repository.put(H(1), b'bar2') self.repository.put(H(2), b'boo') self.repository.delete(H(3)) class RepositoryTestCase(RepositoryTestCaseBase): def test1(self): for x in range(100): self.repository.put(H(x), b'SOMEDATA') key50 = H(50) self.assert_equal(self.repository.get(key50), b'SOMEDATA') self.repository.delete(key50) self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50)) self.repository.commit() self.repository.close() with self.open() as repository2: self.assert_raises(Repository.ObjectNotFound, lambda: repository2.get(key50)) for x in range(100): if x == 50: continue self.assert_equal(repository2.get(H(x)), b'SOMEDATA') def test2(self): """Test multiple sequential transactions """ self.repository.put(H(0), b'foo') self.repository.put(H(1), b'foo') self.repository.commit() self.repository.delete(H(0)) self.repository.put(H(1), b'bar') self.repository.commit() self.assert_equal(self.repository.get(H(1)), b'bar') def test_consistency(self): """Test cache consistency """ self.repository.put(H(0), b'foo') self.assert_equal(self.repository.get(H(0)), b'foo') self.repository.put(H(0), b'foo2') self.assert_equal(self.repository.get(H(0)), b'foo2') self.repository.put(H(0), b'bar') self.assert_equal(self.repository.get(H(0)), b'bar') self.repository.delete(H(0)) self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(H(0))) def test_consistency2(self): """Test cache consistency2 """ self.repository.put(H(0), b'foo') self.assert_equal(self.repository.get(H(0)), b'foo') self.repository.commit() self.repository.put(H(0), b'foo2') self.assert_equal(self.repository.get(H(0)), b'foo2') self.repository.rollback() self.assert_equal(self.repository.get(H(0)), b'foo') def test_overwrite_in_same_transaction(self): """Test cache consistency2 """ self.repository.put(H(0), b'foo') self.repository.put(H(0), b'foo2') self.repository.commit() self.assert_equal(self.repository.get(H(0)), b'foo2') def test_single_kind_transactions(self): # put self.repository.put(H(0), b'foo') self.repository.commit() self.repository.close() # replace self.repository = self.open() with self.repository: self.repository.put(H(0), b'bar') self.repository.commit() # delete self.repository = self.open() with self.repository: self.repository.delete(H(0)) self.repository.commit() def test_list(self): for x in range(100): self.repository.put(H(x), b'SOMEDATA') self.repository.commit() all = self.repository.list() self.assert_equal(len(all), 100) first_half = self.repository.list(limit=50) self.assert_equal(len(first_half), 50) self.assert_equal(first_half, all[:50]) second_half = self.repository.list(marker=first_half[-1]) self.assert_equal(len(second_half), 50) self.assert_equal(second_half, all[50:]) self.assert_equal(len(self.repository.list(limit=50)), 50) def test_scan(self): for x in range(100): self.repository.put(H(x), b'SOMEDATA') self.repository.commit() all = self.repository.scan() assert len(all) == 100 first_half = self.repository.scan(limit=50) assert len(first_half) == 50 assert first_half == all[:50] second_half = self.repository.scan(marker=first_half[-1]) assert len(second_half) == 50 assert second_half == all[50:] assert len(self.repository.scan(limit=50)) == 50 # check result order == on-disk order (which is hash order) for x in range(100): assert all[x] == H(x) def test_max_data_size(self): max_data = b'x' * MAX_DATA_SIZE self.repository.put(H(0), max_data) self.assert_equal(self.repository.get(H(0)), max_data) self.assert_raises(IntegrityError, lambda: self.repository.put(H(1), max_data + b'x')) class LocalRepositoryTestCase(RepositoryTestCaseBase): # test case that doesn't work with remote repositories def _assert_sparse(self): # The superseded 123456... PUT assert self.repository.compact[0] == 41 + 9 # The DELETE issued by the superseding PUT (or issued directly) assert self.repository.compact[2] == 41 self.repository._rebuild_sparse(0) assert self.repository.compact[0] == 41 + 9 def test_sparse1(self): self.repository.put(H(0), b'foo') self.repository.put(H(1), b'123456789') self.repository.commit() self.repository.put(H(1), b'bar') self._assert_sparse() def test_sparse2(self): self.repository.put(H(0), b'foo') self.repository.put(H(1), b'123456789') self.repository.commit() self.repository.delete(H(1)) self._assert_sparse() def test_sparse_delete(self): self.repository.put(H(0), b'1245') self.repository.delete(H(0)) self.repository.io._write_fd.sync() # The on-line tracking works on a per-object basis... assert self.repository.compact[0] == 41 + 41 + 4 self.repository._rebuild_sparse(0) # ...while _rebuild_sparse can mark whole segments as completely sparse (which then includes the segment magic) assert self.repository.compact[0] == 41 + 41 + 4 + len(MAGIC) self.repository.commit() assert 0 not in [segment for segment, _ in self.repository.io.segment_iterator()] def test_uncommitted_garbage(self): # uncommitted garbage should be no problem, it is cleaned up automatically. # we just have to be careful with invalidation of cached FDs in LoggedIO. self.repository.put(H(0), b'foo') self.repository.commit() # write some crap to a uncommitted segment file last_segment = self.repository.io.get_latest_segment() with open(self.repository.io.segment_filename(last_segment + 1), 'wb') as f: f.write(MAGIC + b'crapcrapcrap') self.repository.close() # usually, opening the repo and starting a transaction should trigger a cleanup. self.repository = self.open() with self.repository: self.repository.put(H(0), b'bar') # this may trigger compact_segments() self.repository.commit() # the point here is that nothing blows up with an exception. class RepositoryCommitTestCase(RepositoryTestCaseBase): def test_replay_of_missing_index(self): self.add_keys() for name in os.listdir(self.repository.path): if name.startswith('index.'): os.unlink(os.path.join(self.repository.path, name)) self.reopen() with self.repository: self.assert_equal(len(self.repository), 3) self.assert_equal(self.repository.check(), True) def test_crash_before_compact_segments(self): self.add_keys() self.repository.compact_segments = None try: self.repository.commit() except TypeError: pass self.reopen() with self.repository: self.assert_equal(len(self.repository), 3) self.assert_equal(self.repository.check(), True) def test_crash_before_write_index(self): self.add_keys() self.repository.write_index = None try: self.repository.commit() except TypeError: pass self.reopen() with self.repository: self.assert_equal(len(self.repository), 3) self.assert_equal(self.repository.check(), True) def test_replay_lock_upgrade_old(self): self.add_keys() for name in os.listdir(self.repository.path): if name.startswith('index.'): os.unlink(os.path.join(self.repository.path, name)) with patch.object(Lock, 'upgrade', side_effect=LockFailed) as upgrade: self.reopen(exclusive=None) # simulate old client that always does lock upgrades with self.repository: # the repo is only locked by a shared read lock, but to replay segments, # we need an exclusive write lock - check if the lock gets upgraded. self.assert_raises(LockFailed, lambda: len(self.repository)) upgrade.assert_called_once_with() def test_replay_lock_upgrade(self): self.add_keys() for name in os.listdir(self.repository.path): if name.startswith('index.'): os.unlink(os.path.join(self.repository.path, name)) with patch.object(Lock, 'upgrade', side_effect=LockFailed) as upgrade: self.reopen(exclusive=False) # current client usually does not do lock upgrade, except for replay with self.repository: # the repo is only locked by a shared read lock, but to replay segments, # we need an exclusive write lock - check if the lock gets upgraded. self.assert_raises(LockFailed, lambda: len(self.repository)) upgrade.assert_called_once_with() def test_crash_before_deleting_compacted_segments(self): self.add_keys() self.repository.io.delete_segment = None try: self.repository.commit() except TypeError: pass self.reopen() with self.repository: self.assert_equal(len(self.repository), 3) self.assert_equal(self.repository.check(), True) self.assert_equal(len(self.repository), 3) def test_ignores_commit_tag_in_data(self): self.repository.put(H(0), LoggedIO.COMMIT) self.reopen() with self.repository: io = self.repository.io assert not io.is_committed_segment(io.get_latest_segment()) def test_moved_deletes_are_tracked(self): self.repository.put(H(1), b'1') self.repository.put(H(2), b'2') self.repository.commit() self.repository.delete(H(1)) self.repository.commit() last_segment = self.repository.io.get_latest_segment() - 1 num_deletes = 0 for tag, key, offset, size in self.repository.io.iter_objects(last_segment): if tag == TAG_DELETE: assert key == H(1) num_deletes += 1 assert num_deletes == 1 assert last_segment in self.repository.compact self.repository.put(H(3), b'3') self.repository.commit() assert last_segment not in self.repository.compact assert not self.repository.io.segment_exists(last_segment) for segment, _ in self.repository.io.segment_iterator(): for tag, key, offset, size in self.repository.io.iter_objects(segment): assert tag != TAG_DELETE def test_shadowed_entries_are_preserved(self): get_latest_segment = self.repository.io.get_latest_segment self.repository.put(H(1), b'1') # This is the segment with our original PUT of interest put_segment = get_latest_segment() self.repository.commit() # We now delete H(1), and force this segment to not be compacted, which can happen # if it's not sparse enough (symbolized by H(2) here). self.repository.delete(H(1)) self.repository.put(H(2), b'1') delete_segment = get_latest_segment() # We pretend these are mostly dense (not sparse) and won't be compacted del self.repository.compact[put_segment] del self.repository.compact[delete_segment] self.repository.commit() # Now we perform an unrelated operation on the segment containing the DELETE, # causing it to be compacted. self.repository.delete(H(2)) self.repository.commit() assert self.repository.io.segment_exists(put_segment) assert not self.repository.io.segment_exists(delete_segment) # Basic case, since the index survived this must be ok assert H(1) not in self.repository # Nuke index, force replay os.unlink(os.path.join(self.repository.path, 'index.%d' % get_latest_segment())) # Must not reappear assert H(1) not in self.repository def test_shadow_index_rollback(self): self.repository.put(H(1), b'1') self.repository.delete(H(1)) assert self.repository.shadow_index[H(1)] == [0] self.repository.commit() # note how an empty list means that nothing is shadowed for sure assert self.repository.shadow_index[H(1)] == [] self.repository.put(H(1), b'1') self.repository.delete(H(1)) # 0 put/delete; 1 commit; 2 compacted; 3 commit; 4 put/delete assert self.repository.shadow_index[H(1)] == [4] self.repository.rollback() self.repository.put(H(2), b'1') # After the rollback segment 4 shouldn't be considered anymore assert self.repository.shadow_index[H(1)] == [] class RepositoryAppendOnlyTestCase(RepositoryTestCaseBase): def open(self, create=False): return Repository(os.path.join(self.tmppath, 'repository'), exclusive=True, create=create, append_only=True) def test_destroy_append_only(self): # Can't destroy append only repo (via the API) with self.assert_raises(ValueError): self.repository.destroy() assert self.repository.append_only def test_append_only(self): def segments_in_repository(): return len(list(self.repository.io.segment_iterator())) self.repository.put(H(0), b'foo') self.repository.commit() self.repository.append_only = False assert segments_in_repository() == 2 self.repository.put(H(0), b'foo') self.repository.commit() # normal: compact squashes the data together, only one segment assert segments_in_repository() == 4 self.repository.append_only = True assert segments_in_repository() == 4 self.repository.put(H(0), b'foo') self.repository.commit() # append only: does not compact, only new segments written assert segments_in_repository() == 6 class RepositoryFreeSpaceTestCase(RepositoryTestCaseBase): def test_additional_free_space(self): self.add_keys() self.repository.config.set('repository', 'additional_free_space', '1000T') self.repository.save_key(b'shortcut to save_config') self.reopen() with self.repository: self.repository.put(H(0), b'foobar') with pytest.raises(Repository.InsufficientFreeSpaceError): self.repository.commit() assert os.path.exists(self.repository.path) def test_create_free_space(self): self.repository.additional_free_space = 1e20 with pytest.raises(Repository.InsufficientFreeSpaceError): self.add_keys() assert not os.path.exists(self.repository.path) class QuotaTestCase(RepositoryTestCaseBase): def test_tracking(self): assert self.repository.storage_quota_use == 0 self.repository.put(H(1), bytes(1234)) assert self.repository.storage_quota_use == 1234 + 41 self.repository.put(H(2), bytes(5678)) assert self.repository.storage_quota_use == 1234 + 5678 + 2 * 41 self.repository.delete(H(1)) assert self.repository.storage_quota_use == 5678 + 41 self.repository.commit() self.reopen() with self.repository: # Open new transaction; hints and thus quota data is not loaded unless needed. self.repository.put(H(3), b'') self.repository.delete(H(3)) assert self.repository.storage_quota_use == 5678 + 41 def test_exceed_quota(self): assert self.repository.storage_quota_use == 0 self.repository.storage_quota = 50 self.repository.put(H(1), b'') assert self.repository.storage_quota_use == 41 self.repository.commit() with pytest.raises(Repository.StorageQuotaExceeded): self.repository.put(H(2), b'') assert self.repository.storage_quota_use == 82 with pytest.raises(Repository.StorageQuotaExceeded): self.repository.commit() assert self.repository.storage_quota_use == 82 self.reopen() with self.repository: self.repository.storage_quota = 50 # Open new transaction; hints and thus quota data is not loaded unless needed. self.repository.put(H(1), b'') assert self.repository.storage_quota_use == 41 class NonceReservation(RepositoryTestCaseBase): def test_get_free_nonce_asserts(self): self.reopen(exclusive=False) with pytest.raises(AssertionError): with self.repository: self.repository.get_free_nonce() def test_get_free_nonce(self): with self.repository: assert self.repository.get_free_nonce() is None with open(os.path.join(self.repository.path, "nonce"), "w") as fd: fd.write("0000000000000000") assert self.repository.get_free_nonce() == 0 with open(os.path.join(self.repository.path, "nonce"), "w") as fd: fd.write("5000000000000000") assert self.repository.get_free_nonce() == 0x5000000000000000 def test_commit_nonce_reservation_asserts(self): self.reopen(exclusive=False) with pytest.raises(AssertionError): with self.repository: self.repository.commit_nonce_reservation(0x200, 0x100) def test_commit_nonce_reservation(self): with self.repository: with pytest.raises(Exception): self.repository.commit_nonce_reservation(0x200, 15) self.repository.commit_nonce_reservation(0x200, None) with open(os.path.join(self.repository.path, "nonce"), "r") as fd: assert fd.read() == "0000000000000200" with pytest.raises(Exception): self.repository.commit_nonce_reservation(0x200, 15) self.repository.commit_nonce_reservation(0x400, 0x200) with open(os.path.join(self.repository.path, "nonce"), "r") as fd: assert fd.read() == "0000000000000400" class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase): def setUp(self): super().setUp() self.repository.put(H(0), b'foo') self.repository.commit() self.repository.close() def do_commit(self): with self.repository: self.repository.put(H(0), b'fox') self.repository.commit() def test_corrupted_hints(self): with open(os.path.join(self.repository.path, 'hints.1'), 'ab') as fd: fd.write(b'123456789') self.do_commit() def test_deleted_hints(self): os.unlink(os.path.join(self.repository.path, 'hints.1')) self.do_commit() def test_deleted_index(self): os.unlink(os.path.join(self.repository.path, 'index.1')) self.do_commit() def test_unreadable_hints(self): hints = os.path.join(self.repository.path, 'hints.1') os.unlink(hints) os.mkdir(hints) with self.assert_raises(OSError): self.do_commit() def test_index(self): with open(os.path.join(self.repository.path, 'index.1'), 'wb') as fd: fd.write(b'123456789') self.do_commit() def test_index_outside_transaction(self): with open(os.path.join(self.repository.path, 'index.1'), 'wb') as fd: fd.write(b'123456789') with self.repository: assert len(self.repository) == 1 def _corrupt_index(self): # HashIndex is able to detect incorrect headers and file lengths, # but on its own it can't tell if the data is correct. index_path = os.path.join(self.repository.path, 'index.1') with open(index_path, 'r+b') as fd: index_data = fd.read() # Flip one bit in a key stored in the index corrupted_key = (int.from_bytes(H(0), 'little') ^ 1).to_bytes(32, 'little') corrupted_index_data = index_data.replace(H(0), corrupted_key) assert corrupted_index_data != index_data assert len(corrupted_index_data) == len(index_data) fd.seek(0) fd.write(corrupted_index_data) def test_index_corrupted(self): # HashIndex is able to detect incorrect headers and file lengths, # but on its own it can't tell if the data itself is correct. self._corrupt_index() with self.repository: # Data corruption is detected due to mismatching checksums # and fixed by rebuilding the index. assert len(self.repository) == 1 assert self.repository.get(H(0)) == b'foo' def test_index_corrupted_without_integrity(self): self._corrupt_index() integrity_path = os.path.join(self.repository.path, 'integrity.1') os.unlink(integrity_path) with self.repository: # Since the corrupted key is not noticed, the repository still thinks # it contains one key... assert len(self.repository) == 1 with pytest.raises(Repository.ObjectNotFound): # ... but the real, uncorrupted key is not found in the corrupted index. self.repository.get(H(0)) def test_unreadable_index(self): index = os.path.join(self.repository.path, 'index.1') os.unlink(index) os.mkdir(index) with self.assert_raises(OSError): self.do_commit() def test_unknown_integrity_version(self): # For now an unknown integrity data version is ignored and not an error. integrity_path = os.path.join(self.repository.path, 'integrity.1') with open(integrity_path, 'r+b') as fd: msgpack.pack({ # Borg only understands version 2 b'version': 4.7, }, fd) fd.truncate() with self.repository: # No issues accessing the repository assert len(self.repository) == 1 assert self.repository.get(H(0)) == b'foo' def _subtly_corrupted_hints_setup(self): with self.repository: self.repository.append_only = True assert len(self.repository) == 1 assert self.repository.get(H(0)) == b'foo' self.repository.put(H(1), b'bar') self.repository.put(H(2), b'baz') self.repository.commit() self.repository.put(H(2), b'bazz') self.repository.commit() hints_path = os.path.join(self.repository.path, 'hints.5') with open(hints_path, 'r+b') as fd: hints = msgpack.unpack(fd) fd.seek(0) # Corrupt segment refcount assert hints[b'segments'][2] == 1 hints[b'segments'][2] = 0 msgpack.pack(hints, fd) fd.truncate() def test_subtly_corrupted_hints(self): self._subtly_corrupted_hints_setup() with self.repository: self.repository.append_only = False self.repository.put(H(3), b'1234') # Do a compaction run. Succeeds, since the failed checksum prompted a rebuild of the index+hints. self.repository.commit() assert len(self.repository) == 4 assert self.repository.get(H(0)) == b'foo' assert self.repository.get(H(1)) == b'bar' assert self.repository.get(H(2)) == b'bazz' def test_subtly_corrupted_hints_without_integrity(self): self._subtly_corrupted_hints_setup() integrity_path = os.path.join(self.repository.path, 'integrity.5') os.unlink(integrity_path) with self.repository: self.repository.append_only = False self.repository.put(H(3), b'1234') # Do a compaction run. Fails, since the corrupted refcount was not detected and leads to an assertion failure. with pytest.raises(AssertionError) as exc_info: self.repository.commit() assert 'Corrupted segment reference count' in str(exc_info.value) class RepositoryCheckTestCase(RepositoryTestCaseBase): def list_indices(self): return [name for name in os.listdir(os.path.join(self.tmppath, 'repository')) if name.startswith('index.')] def check(self, repair=False, status=True): self.assert_equal(self.repository.check(repair=repair), status) # Make sure no tmp files are left behind self.assert_equal([name for name in os.listdir(os.path.join(self.tmppath, 'repository')) if 'tmp' in name], [], 'Found tmp files') def get_objects(self, *ids): for id_ in ids: self.repository.get(H(id_)) def add_objects(self, segments): for ids in segments: for id_ in ids: self.repository.put(H(id_), b'data') self.repository.commit() def get_head(self): return sorted(int(n) for n in os.listdir(os.path.join(self.tmppath, 'repository', 'data', '0')) if n.isdigit())[-1] def open_index(self): return NSIndex.read(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head()))) def corrupt_object(self, id_): idx = self.open_index() segment, offset = idx[H(id_)] with open(os.path.join(self.tmppath, 'repository', 'data', '0', str(segment)), 'r+b') as fd: fd.seek(offset) fd.write(b'BOOM') def delete_segment(self, segment): os.unlink(os.path.join(self.tmppath, 'repository', 'data', '0', str(segment))) def delete_index(self): os.unlink(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head()))) def rename_index(self, new_name): os.rename(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())), os.path.join(self.tmppath, 'repository', new_name)) def list_objects(self): return set(int(key) for key in self.repository.list()) def test_repair_corrupted_segment(self): self.add_objects([[1, 2, 3], [4, 5], [6]]) self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) self.check(status=True) self.corrupt_object(5) self.assert_raises(IntegrityError, lambda: self.get_objects(5)) self.repository.rollback() # Make sure a regular check does not repair anything self.check(status=False) self.check(status=False) # Make sure a repair actually repairs the repo self.check(repair=True, status=True) self.get_objects(4) self.check(status=True) self.assert_equal(set([1, 2, 3, 4, 6]), self.list_objects()) def test_repair_missing_segment(self): self.add_objects([[1, 2, 3], [4, 5, 6]]) self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) self.check(status=True) self.delete_segment(2) self.repository.rollback() self.check(repair=True, status=True) self.assert_equal(set([1, 2, 3]), self.list_objects()) def test_repair_missing_commit_segment(self): self.add_objects([[1, 2, 3], [4, 5, 6]]) self.delete_segment(3) self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4)) self.assert_equal(set([1, 2, 3]), self.list_objects()) def test_repair_corrupted_commit_segment(self): self.add_objects([[1, 2, 3], [4, 5, 6]]) with open(os.path.join(self.tmppath, 'repository', 'data', '0', '3'), 'r+b') as fd: fd.seek(-1, os.SEEK_END) fd.write(b'X') self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4)) self.check(status=True) self.get_objects(3) self.assert_equal(set([1, 2, 3]), self.list_objects()) def test_repair_no_commits(self): self.add_objects([[1, 2, 3]]) with open(os.path.join(self.tmppath, 'repository', 'data', '0', '1'), 'r+b') as fd: fd.seek(-1, os.SEEK_END) fd.write(b'X') self.assert_raises(Repository.CheckNeeded, lambda: self.get_objects(4)) self.check(status=False) self.check(status=False) self.assert_equal(self.list_indices(), ['index.1']) self.check(repair=True, status=True) self.assert_equal(self.list_indices(), ['index.3']) self.check(status=True) self.get_objects(3) self.assert_equal(set([1, 2, 3]), self.list_objects()) def test_repair_missing_index(self): self.add_objects([[1, 2, 3], [4, 5, 6]]) self.delete_index() self.check(status=True) self.get_objects(4) self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) def test_repair_index_too_new(self): self.add_objects([[1, 2, 3], [4, 5, 6]]) self.assert_equal(self.list_indices(), ['index.3']) self.rename_index('index.100') self.check(status=True) self.assert_equal(self.list_indices(), ['index.3']) self.get_objects(4) self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) def test_crash_before_compact(self): self.repository.put(H(0), b'data') self.repository.put(H(0), b'data2') # Simulate a crash before compact with patch.object(Repository, 'compact_segments') as compact: self.repository.commit() compact.assert_called_once_with() self.reopen() with self.repository: self.check(repair=True) self.assert_equal(self.repository.get(H(0)), b'data2') class RemoteRepositoryTestCase(RepositoryTestCase): repository = None # type: RemoteRepository def open(self, create=False): return RemoteRepository(Location('__testsuite__:' + os.path.join(self.tmppath, 'repository')), exclusive=True, create=create) def _get_mock_args(self): class MockArgs: remote_path = 'borg' umask = 0o077 debug_topics = [] rsh = None def __contains__(self, item): # To behave like argparse.Namespace return hasattr(self, item) return MockArgs() def test_invalid_rpc(self): self.assert_raises(InvalidRPCMethod, lambda: self.repository.call('__init__', {})) def test_rpc_exception_transport(self): s1 = 'test string' try: self.repository.call('inject_exception', {'kind': 'DoesNotExist'}) except Repository.DoesNotExist as e: assert len(e.args) == 1 assert e.args[0] == self.repository.location.orig try: self.repository.call('inject_exception', {'kind': 'AlreadyExists'}) except Repository.AlreadyExists as e: assert len(e.args) == 1 assert e.args[0] == self.repository.location.orig try: self.repository.call('inject_exception', {'kind': 'CheckNeeded'}) except Repository.CheckNeeded as e: assert len(e.args) == 1 assert e.args[0] == self.repository.location.orig try: self.repository.call('inject_exception', {'kind': 'IntegrityError'}) except IntegrityError as e: assert len(e.args) == 1 assert e.args[0] == s1 try: self.repository.call('inject_exception', {'kind': 'PathNotAllowed'}) except PathNotAllowed as e: assert len(e.args) == 1 assert e.args[0] == 'foo' try: self.repository.call('inject_exception', {'kind': 'ObjectNotFound'}) except Repository.ObjectNotFound as e: assert len(e.args) == 2 assert e.args[0] == s1 assert e.args[1] == self.repository.location.orig try: self.repository.call('inject_exception', {'kind': 'InvalidRPCMethod'}) except InvalidRPCMethod as e: assert len(e.args) == 1 assert e.args[0] == s1 try: self.repository.call('inject_exception', {'kind': 'divide'}) except RemoteRepository.RPCError as e: assert e.unpacked assert e.get_message() == 'ZeroDivisionError: integer division or modulo by zero\n' assert e.exception_class == 'ZeroDivisionError' assert len(e.exception_full) > 0 def test_ssh_cmd(self): args = self._get_mock_args() self.repository._args = args assert self.repository.ssh_cmd(Location('example.com:foo')) == ['ssh', 'example.com'] assert self.repository.ssh_cmd(Location('ssh://example.com/foo')) == ['ssh', 'example.com'] assert self.repository.ssh_cmd(Location('ssh://user@example.com/foo')) == ['ssh', 'user@example.com'] assert self.repository.ssh_cmd(Location('ssh://user@example.com:1234/foo')) == ['ssh', '-p', '1234', 'user@example.com'] os.environ['BORG_RSH'] = 'ssh --foo' assert self.repository.ssh_cmd(Location('example.com:foo')) == ['ssh', '--foo', 'example.com'] def test_borg_cmd(self): assert self.repository.borg_cmd(None, testing=True) == [sys.executable, '-m', 'borg.archiver', 'serve'] args = self._get_mock_args() # XXX without next line we get spurious test fails when using pytest-xdist, root cause unknown: logging.getLogger().setLevel(logging.INFO) # note: test logger is on info log level, so --info gets added automagically assert self.repository.borg_cmd(args, testing=False) == ['borg', 'serve', '--umask=077', '--info'] args.remote_path = 'borg-0.28.2' assert self.repository.borg_cmd(args, testing=False) == ['borg-0.28.2', 'serve', '--umask=077', '--info'] args.debug_topics = ['something_client_side', 'repository_compaction'] assert self.repository.borg_cmd(args, testing=False) == ['borg-0.28.2', 'serve', '--umask=077', '--info', '--debug-topic=borg.debug.repository_compaction'] args = self._get_mock_args() args.storage_quota = 0 assert self.repository.borg_cmd(args, testing=False) == ['borg', 'serve', '--umask=077', '--info'] args.storage_quota = 314159265 assert self.repository.borg_cmd(args, testing=False) == ['borg', 'serve', '--umask=077', '--info', '--storage-quota=314159265'] args.rsh = 'ssh -i foo' self.repository._args = args assert self.repository.ssh_cmd(Location('example.com:foo')) == ['ssh', '-i', 'foo', 'example.com'] class RemoteLegacyFree(RepositoryTestCaseBase): # Keep testing this so we can someday safely remove the legacy tuple format. def open(self, create=False): with patch.object(RemoteRepository, 'dictFormat', True): return RemoteRepository(Location('__testsuite__:' + os.path.join(self.tmppath, 'repository')), exclusive=True, create=create) def test_legacy_free(self): # put self.repository.put(H(0), b'foo') self.repository.commit() self.repository.close() # replace self.repository = self.open() with self.repository: self.repository.put(H(0), b'bar') self.repository.commit() # delete self.repository = self.open() with self.repository: self.repository.delete(H(0)) self.repository.commit() class RemoteRepositoryCheckTestCase(RepositoryCheckTestCase): def open(self, create=False): return RemoteRepository(Location('__testsuite__:' + os.path.join(self.tmppath, 'repository')), exclusive=True, create=create) def test_crash_before_compact(self): # skip this test, we can't mock-patch a Repository class in another process! pass class RemoteLoggerTestCase(BaseTestCase): def setUp(self): self.stream = io.StringIO() self.handler = logging.StreamHandler(self.stream) logging.getLogger().handlers[:] = [self.handler] logging.getLogger('borg.repository').handlers[:] = [] logging.getLogger('borg.repository.foo').handlers[:] = [] # capture stderr sys.stderr.flush() self.old_stderr = sys.stderr self.stderr = sys.stderr = io.StringIO() def tearDown(self): sys.stderr = self.old_stderr def test_stderr_messages(self): handle_remote_line("unstructured stderr message\n") self.assert_equal(self.stream.getvalue(), '') # stderr messages don't get an implicit newline self.assert_equal(self.stderr.getvalue(), 'Remote: unstructured stderr message\n') def test_stderr_progress_messages(self): handle_remote_line("unstructured stderr progress message\r") self.assert_equal(self.stream.getvalue(), '') # stderr messages don't get an implicit newline self.assert_equal(self.stderr.getvalue(), 'Remote: unstructured stderr progress message\r') def test_pre11_format_messages(self): self.handler.setLevel(logging.DEBUG) logging.getLogger().setLevel(logging.DEBUG) handle_remote_line("$LOG INFO Remote: borg < 1.1 format message\n") self.assert_equal(self.stream.getvalue(), 'Remote: borg < 1.1 format message\n') self.assert_equal(self.stderr.getvalue(), '') def test_post11_format_messages(self): self.handler.setLevel(logging.DEBUG) logging.getLogger().setLevel(logging.DEBUG) handle_remote_line("$LOG INFO borg.repository Remote: borg >= 1.1 format message\n") self.assert_equal(self.stream.getvalue(), 'Remote: borg >= 1.1 format message\n') self.assert_equal(self.stderr.getvalue(), '') def test_remote_messages_screened(self): # default borg config for root logger self.handler.setLevel(logging.WARNING) logging.getLogger().setLevel(logging.WARNING) handle_remote_line("$LOG INFO borg.repository Remote: new format info message\n") self.assert_equal(self.stream.getvalue(), '') self.assert_equal(self.stderr.getvalue(), '') def test_info_to_correct_local_child(self): logging.getLogger('borg.repository').setLevel(logging.INFO) logging.getLogger('borg.repository.foo').setLevel(logging.INFO) # default borg config for root logger self.handler.setLevel(logging.WARNING) logging.getLogger().setLevel(logging.WARNING) child_stream = io.StringIO() child_handler = logging.StreamHandler(child_stream) child_handler.setLevel(logging.INFO) logging.getLogger('borg.repository').handlers[:] = [child_handler] foo_stream = io.StringIO() foo_handler = logging.StreamHandler(foo_stream) foo_handler.setLevel(logging.INFO) logging.getLogger('borg.repository.foo').handlers[:] = [foo_handler] handle_remote_line("$LOG INFO borg.repository Remote: new format child message\n") self.assert_equal(foo_stream.getvalue(), '') self.assert_equal(child_stream.getvalue(), 'Remote: new format child message\n') self.assert_equal(self.stream.getvalue(), '') self.assert_equal(self.stderr.getvalue(), '') borgbackup-1.1.15/src/borg/testsuite/chunker.py0000644000175000017500000000621413771325506021472 0ustar useruser00000000000000from io import BytesIO from ..chunker import Chunker, buzhash, buzhash_update from ..constants import * # NOQA from . import BaseTestCase # Note: these tests are part of the self test, do not use or import py.test functionality here. # See borg.selftest for details. If you add/remove test methods, update SELFTEST_COUNT class ChunkerTestCase(BaseTestCase): def test_chunkify(self): data = b'0' * int(1.5 * (1 << CHUNK_MAX_EXP)) + b'Y' parts = [bytes(c) for c in Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(data))] self.assert_equal(len(parts), 2) self.assert_equal(b''.join(parts), data) self.assert_equal([bytes(c) for c in Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b''))], []) self.assert_equal([bytes(c) for c in Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'fooba', b'rboobaz', b'fooba', b'rboobaz', b'fooba', b'rboobaz']) self.assert_equal([bytes(c) for c in Chunker(1, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'fo', b'obarb', b'oob', b'azf', b'oobarb', b'oob', b'azf', b'oobarb', b'oobaz']) self.assert_equal([bytes(c) for c in Chunker(2, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foob', b'ar', b'boobazfoob', b'ar', b'boobazfoob', b'ar', b'boobaz']) self.assert_equal([bytes(c) for c in Chunker(0, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobarboobaz' * 3]) self.assert_equal([bytes(c) for c in Chunker(1, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobar', b'boobazfo', b'obar', b'boobazfo', b'obar', b'boobaz']) self.assert_equal([bytes(c) for c in Chunker(2, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foob', b'arboobaz', b'foob', b'arboobaz', b'foob', b'arboobaz']) self.assert_equal([bytes(c) for c in Chunker(0, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobarboobaz' * 3]) self.assert_equal([bytes(c) for c in Chunker(1, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobarbo', b'obazfoobar', b'boobazfo', b'obarboobaz']) self.assert_equal([bytes(c) for c in Chunker(2, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobarboobaz', b'foobarboobaz', b'foobarboobaz']) def test_buzhash(self): self.assert_equal(buzhash(b'abcdefghijklmnop', 0), 3795437769) self.assert_equal(buzhash(b'abcdefghijklmnop', 1), 3795400502) self.assert_equal(buzhash(b'abcdefghijklmnop', 1), buzhash_update(buzhash(b'Xabcdefghijklmno', 1), ord('X'), ord('p'), 16, 1)) # Test with more than 31 bytes to make sure our barrel_shift macro works correctly self.assert_equal(buzhash(b'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz', 0), 566521248) def test_small_reads(self): class SmallReadFile: input = b'a' * (20 + 1) def read(self, nbytes): self.input = self.input[:-1] return self.input[:1] reconstructed = b''.join(Chunker(0, *CHUNKER_PARAMS).chunkify(SmallReadFile())) assert reconstructed == b'a' * 20 borgbackup-1.1.15/src/borg/testsuite/upgrader.py0000644000175000017500000001537113771325506021650 0ustar useruser00000000000000import os import tarfile import pytest from ..constants import * # NOQA from ..crypto.key import KeyfileKey from ..upgrader import AtticRepositoryUpgrader, AtticKeyfileKey from ..helpers import get_keys_dir from ..repository import Repository from . import are_hardlinks_supported # tar with a repo and repo keyfile from attic ATTIC_TAR = os.path.join(os.path.dirname(__file__), 'attic.tar.gz') def untar(tarfname, path, what): """ extract tar archive to , all stuff starting with . return path to . """ def files(members): for tarinfo in members: if tarinfo.name.startswith(what): yield tarinfo with tarfile.open(tarfname, 'r') as tf: tf.extractall(path, members=files(tf)) return os.path.join(path, what) def repo_valid(path): """ utility function to check if borg can open a repository :param path: the path to the repository :returns: if borg can check the repository """ with Repository(str(path), exclusive=True, create=False) as repository: # can't check raises() because check() handles the error return repository.check() def key_valid(path): """ check that the new keyfile is alright :param path: the path to the key file :returns: if the file starts with the borg magic string """ keyfile = os.path.join(get_keys_dir(), os.path.basename(path)) with open(keyfile, 'r') as f: return f.read().startswith(KeyfileKey.FILE_ID) def make_attic_repo(dir): """ create an attic repo with some stuff in it :param dir: path to the repository to be created :returns: path to attic repository """ # there is some stuff in that repo, copied from `RepositoryTestCase.test1` return untar(ATTIC_TAR, str(dir), 'repo') @pytest.fixture() def attic_repo(tmpdir): return make_attic_repo(tmpdir) @pytest.fixture(params=[True, False]) def inplace(request): return request.param def test_convert_segments(attic_repo, inplace): """test segment conversion this will load the given attic repository, list all the segments then convert them one at a time. we need to close the repo before conversion otherwise we have errors from borg :param attic_repo: a populated attic repository (fixture) """ repo_path = attic_repo with pytest.raises(Repository.AtticRepository): repo_valid(repo_path) repository = AtticRepositoryUpgrader(repo_path, create=False) with repository: segments = [filename for i, filename in repository.io.segment_iterator()] repository.convert_segments(segments, dryrun=False, inplace=inplace) repository.convert_cache(dryrun=False) assert repo_valid(repo_path) @pytest.fixture() def attic_key_file(tmpdir, monkeypatch): """ create an attic key file from the given repo, in the keys subdirectory of the given tmpdir :param tmpdir: a temporary directory (a builtin fixture) :returns: path to key file """ keys_dir = untar(ATTIC_TAR, str(tmpdir), 'keys') # we use the repo dir for the created keyfile, because we do # not want to clutter existing keyfiles monkeypatch.setenv('ATTIC_KEYS_DIR', keys_dir) # we use the same directory for the converted files, which # will clutter the previously created one, which we don't care # about anyways. in real runs, the original key will be retained. monkeypatch.setenv('BORG_KEYS_DIR', keys_dir) monkeypatch.setenv('ATTIC_PASSPHRASE', 'test') return os.path.join(keys_dir, 'repo') def test_keys(attic_repo, attic_key_file): """test key conversion test that we can convert the given key to a properly formatted borg key. assumes that the ATTIC_KEYS_DIR and BORG_KEYS_DIR have been properly populated by the attic_key_file fixture. :param attic_repo: path to an attic repository (fixture defined above) :param attic_key_file: path to an attic key file (fixture defined above) """ keyfile_path = attic_key_file assert not key_valid(keyfile_path) # not upgraded yet with AtticRepositoryUpgrader(attic_repo, create=False) as repository: keyfile = AtticKeyfileKey.find_key_file(repository) AtticRepositoryUpgrader.convert_keyfiles(keyfile, dryrun=False) assert key_valid(keyfile_path) @pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') def test_convert_all(attic_repo, attic_key_file, inplace): """test all conversion steps this runs everything. mostly redundant test, since everything is done above. yet we expect a NotImplementedError because we do not convert caches yet. :param attic_repo: path to an attic repository (fixture defined above) :param attic_key_file: path to an attic key file (fixture defined above) """ repo_path = attic_repo with pytest.raises(Repository.AtticRepository): repo_valid(repo_path) def stat_segment(path): return os.stat(os.path.join(path, 'data', '0', '0')) def first_inode(path): return stat_segment(path).st_ino orig_inode = first_inode(repo_path) with AtticRepositoryUpgrader(repo_path, create=False) as repository: # replicate command dispatch, partly os.umask(UMASK_DEFAULT) backup = repository.upgrade(dryrun=False, inplace=inplace) # note: uses hardlinks internally if inplace: assert backup is None assert first_inode(repository.path) == orig_inode else: assert backup assert first_inode(repository.path) != first_inode(backup) # i have seen cases where the copied tree has world-readable # permissions, which is wrong if 'BORG_TESTS_IGNORE_MODES' not in os.environ: assert stat_segment(backup).st_mode & UMASK_DEFAULT == 0 assert key_valid(attic_key_file) assert repo_valid(repo_path) @pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') def test_hardlink(tmpdir, inplace): """test that we handle hard links properly that is, if we are in "inplace" mode, hardlinks should *not* change (ie. we write to the file directly, so we do not rewrite the whole file, and we do not re-create the file). if we are *not* in inplace mode, then the inode should change, as we are supposed to leave the original inode alone.""" a = str(tmpdir.join('a')) with open(a, 'wb') as tmp: tmp.write(b'aXXX') b = str(tmpdir.join('b')) os.link(a, b) AtticRepositoryUpgrader.header_replace(b, b'a', b'b', inplace=inplace) if not inplace: assert os.stat(a).st_ino != os.stat(b).st_ino else: assert os.stat(a).st_ino == os.stat(b).st_ino with open(b, 'rb') as tmp: assert tmp.read() == b'bXXX' borgbackup-1.1.15/src/borg/testsuite/nanorst.py0000644000175000017500000000176113771325506021521 0ustar useruser00000000000000 import pytest from ..nanorst import rst_to_text def test_inline(): assert rst_to_text('*foo* and ``bar``.') == 'foo and bar.' def test_inline_spread(): assert rst_to_text('*foo and bar, thusly\nfoobar*.') == 'foo and bar, thusly\nfoobar.' def test_comment_inline(): assert rst_to_text('Foo and Bar\n.. foo\nbar') == 'Foo and Bar\n.. foo\nbar' def test_inline_escape(): assert rst_to_text('Such as "\\*" characters.') == 'Such as "*" characters.' def test_comment(): assert rst_to_text('Foo and Bar\n\n.. foo\nbar') == 'Foo and Bar\n\nbar' def test_directive_note(): assert rst_to_text('.. note::\n Note this and that') == 'Note:\n Note this and that' def test_ref(): references = { 'foo': 'baz' } assert rst_to_text('See :ref:`fo\no`.', references=references) == 'See baz.' def test_undefined_ref(): with pytest.raises(ValueError) as exc_info: rst_to_text('See :ref:`foo`.') assert 'Undefined reference' in str(exc_info.value) borgbackup-1.1.15/src/borg/testsuite/cache.py0000644000175000017500000002176613771325506021107 0ustar useruser00000000000000import io import os.path import pytest from .hashindex import H from .key import TestKey from ..archive import Statistics from ..cache import AdHocCache from ..compress import CompressionSpec from ..crypto.key import RepoKey from ..hashindex import ChunkIndex, CacheSynchronizer from ..helpers import Manifest from ..helpers import msgpack from ..repository import Repository packb = msgpack.packb class TestCacheSynchronizer: @pytest.fixture def index(self): return ChunkIndex() @pytest.fixture def sync(self, index): return CacheSynchronizer(index) def test_no_chunks(self, index, sync): data = packb({ 'foo': 'bar', 'baz': 1234, 'bar': 5678, 'user': 'chunks', 'chunks': [] }) sync.feed(data) assert not len(index) def test_simple(self, index, sync): data = packb({ 'foo': 'bar', 'baz': 1234, 'bar': 5678, 'user': 'chunks', 'chunks': [ (H(1), 1, 2), (H(2), 2, 3), ] }) sync.feed(data) assert len(index) == 2 assert index[H(1)] == (1, 1, 2) assert index[H(2)] == (1, 2, 3) def test_multiple(self, index, sync): data = packb({ 'foo': 'bar', 'baz': 1234, 'bar': 5678, 'user': 'chunks', 'chunks': [ (H(1), 1, 2), (H(2), 2, 3), ] }) data += packb({ 'xattrs': { 'security.foo': 'bar', 'chunks': '123456', }, 'stuff': [ (1, 2, 3), ] }) data += packb({ 'xattrs': { 'security.foo': 'bar', 'chunks': '123456', }, 'chunks': [ (H(1), 1, 2), (H(2), 2, 3), ], 'stuff': [ (1, 2, 3), ] }) data += packb({ 'chunks': [ (H(3), 1, 2), ], }) data += packb({ 'chunks': [ (H(1), 1, 2), ], }) part1 = data[:70] part2 = data[70:120] part3 = data[120:] sync.feed(part1) sync.feed(part2) sync.feed(part3) assert len(index) == 3 assert index[H(1)] == (3, 1, 2) assert index[H(2)] == (2, 2, 3) assert index[H(3)] == (1, 1, 2) @pytest.mark.parametrize('elem,error', ( ({1: 2}, 'Unexpected object: map'), (bytes(213), [ 'Unexpected bytes in chunks structure', # structure 2/3 'Incorrect key length']), # structure 3/3 (1, 'Unexpected object: integer'), (1.0, 'Unexpected object: double'), (True, 'Unexpected object: true'), (False, 'Unexpected object: false'), (None, 'Unexpected object: nil'), )) @pytest.mark.parametrize('structure', ( lambda elem: {'chunks': elem}, lambda elem: {'chunks': [elem]}, lambda elem: {'chunks': [(elem, 1, 2)]}, )) def test_corrupted(self, sync, structure, elem, error): packed = packb(structure(elem)) with pytest.raises(ValueError) as excinfo: sync.feed(packed) if isinstance(error, str): error = [error] possible_errors = ['cache_sync_feed failed: ' + error for error in error] assert str(excinfo.value) in possible_errors @pytest.mark.parametrize('data,error', ( # Incorrect tuple length ({'chunks': [(bytes(32), 2, 3, 4)]}, 'Invalid chunk list entry length'), ({'chunks': [(bytes(32), 2)]}, 'Invalid chunk list entry length'), # Incorrect types ({'chunks': [(1, 2, 3)]}, 'Unexpected object: integer'), ({'chunks': [(1, bytes(32), 2)]}, 'Unexpected object: integer'), ({'chunks': [(bytes(32), 1.0, 2)]}, 'Unexpected object: double'), )) def test_corrupted_ancillary(self, index, sync, data, error): packed = packb(data) with pytest.raises(ValueError) as excinfo: sync.feed(packed) assert str(excinfo.value) == 'cache_sync_feed failed: ' + error def make_index_with_refcount(self, refcount): index_data = io.BytesIO() index_data.write(b'BORG_IDX') # num_entries index_data.write((1).to_bytes(4, 'little')) # num_buckets index_data.write((1).to_bytes(4, 'little')) # key_size index_data.write((32).to_bytes(1, 'little')) # value_size index_data.write((3 * 4).to_bytes(1, 'little')) index_data.write(H(0)) index_data.write(refcount.to_bytes(4, 'little')) index_data.write((1234).to_bytes(4, 'little')) index_data.write((5678).to_bytes(4, 'little')) index_data.seek(0) index = ChunkIndex.read(index_data) return index def test_corrupted_refcount(self): index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE + 1) sync = CacheSynchronizer(index) data = packb({ 'chunks': [ (H(0), 1, 2), ] }) with pytest.raises(ValueError) as excinfo: sync.feed(data) assert str(excinfo.value) == 'cache_sync_feed failed: invalid reference count' def test_refcount_max_value(self): index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE) sync = CacheSynchronizer(index) data = packb({ 'chunks': [ (H(0), 1, 2), ] }) sync.feed(data) assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234, 5678) def test_refcount_one_below_max_value(self): index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE - 1) sync = CacheSynchronizer(index) data = packb({ 'chunks': [ (H(0), 1, 2), ] }) sync.feed(data) # Incremented to maximum assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234, 5678) sync.feed(data) assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234, 5678) class TestAdHocCache: @pytest.fixture def repository(self, tmpdir): self.repository_location = os.path.join(str(tmpdir), 'repository') with Repository(self.repository_location, exclusive=True, create=True) as repository: repository.put(H(1), b'1234') repository.put(Manifest.MANIFEST_ID, b'5678') yield repository @pytest.fixture def key(self, repository, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', 'test') key = RepoKey.create(repository, TestKey.MockArgs()) key.compressor = CompressionSpec('none').compressor return key @pytest.fixture def manifest(self, repository, key): Manifest(key, repository).write() return Manifest.load(repository, key=key, operations=Manifest.NO_OPERATION_CHECK)[0] @pytest.fixture def cache(self, repository, key, manifest): return AdHocCache(repository, key, manifest) def test_does_not_contain_manifest(self, cache): assert not cache.seen_chunk(Manifest.MANIFEST_ID) def test_does_not_delete_existing_chunks(self, repository, cache): assert cache.seen_chunk(H(1)) == ChunkIndex.MAX_VALUE cache.chunk_decref(H(1), Statistics()) assert repository.get(H(1)) == b'1234' def test_does_not_overwrite(self, cache): with pytest.raises(AssertionError): cache.add_chunk(H(1), b'5678', Statistics(), overwrite=True) def test_seen_chunk_add_chunk_size(self, cache): assert cache.add_chunk(H(1), b'5678', Statistics()) == (H(1), 4, 0) def test_deletes_chunks_during_lifetime(self, cache, repository): """E.g. checkpoint archives""" cache.add_chunk(H(5), b'1010', Statistics()) assert cache.seen_chunk(H(5)) == 1 cache.chunk_decref(H(5), Statistics()) assert not cache.seen_chunk(H(5)) with pytest.raises(Repository.ObjectNotFound): repository.get(H(5)) def test_files_cache(self, cache): assert cache.file_known_and_unchanged(bytes(32), None) == (False, None) assert cache.cache_mode == 'd' assert cache.files is None def test_txn(self, cache): assert not cache._txn_active cache.seen_chunk(H(5)) assert cache._txn_active assert cache.chunks cache.rollback() assert not cache._txn_active assert not hasattr(cache, 'chunks') def test_incref_after_add_chunk(self, cache): assert cache.add_chunk(H(3), b'5678', Statistics()) == (H(3), 4, 47) assert cache.chunk_incref(H(3), Statistics()) == (H(3), 4, 47) def test_existing_incref_after_add_chunk(self, cache): """This case occurs with part files, see Archive.chunk_file.""" assert cache.add_chunk(H(1), b'5678', Statistics()) == (H(1), 4, 0) assert cache.chunk_incref(H(1), Statistics()) == (H(1), 4, 0) borgbackup-1.1.15/src/borg/testsuite/crypto.py0000644000175000017500000001700213771325506021350 0ustar useruser00000000000000from binascii import hexlify, unhexlify from ..crypto.low_level import AES, bytes_to_long, bytes_to_int, long_to_bytes, hmac_sha256, blake2b_256 from ..crypto.low_level import increment_iv, bytes16_to_int, int_to_bytes16 from ..crypto.low_level import hkdf_hmac_sha512 from . import BaseTestCase # Note: these tests are part of the self test, do not use or import py.test functionality here. # See borg.selftest for details. If you add/remove test methods, update SELFTEST_COUNT class CryptoTestCase(BaseTestCase): def test_bytes_to_int(self): self.assert_equal(bytes_to_int(b'\0\0\0\1'), 1) def test_bytes_to_long(self): self.assert_equal(bytes_to_long(b'\0\0\0\0\0\0\0\1'), 1) self.assert_equal(long_to_bytes(1), b'\0\0\0\0\0\0\0\1') def test_bytes16_to_int(self): self.assert_equal(bytes16_to_int(b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1'), 1) self.assert_equal(int_to_bytes16(1), b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1') self.assert_equal(bytes16_to_int(b'\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0\0'), 2 ** 64) self.assert_equal(int_to_bytes16(2 ** 64), b'\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0\0') def test_increment_iv(self): iv0 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0' iv1 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1' iv2 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\2' self.assert_equal(increment_iv(iv0, 0), iv0) self.assert_equal(increment_iv(iv0, 1), iv1) self.assert_equal(increment_iv(iv0, 2), iv2) iva = b'\0\0\0\0\0\0\0\0\xff\xff\xff\xff\xff\xff\xff\xff' ivb = b'\0\0\0\0\0\0\0\1\x00\x00\x00\x00\x00\x00\x00\x00' ivc = b'\0\0\0\0\0\0\0\1\x00\x00\x00\x00\x00\x00\x00\x01' self.assert_equal(increment_iv(iva, 0), iva) self.assert_equal(increment_iv(iva, 1), ivb) self.assert_equal(increment_iv(iva, 2), ivc) self.assert_equal(increment_iv(iv0, 2**64), ivb) def test_aes(self): key = b'X' * 32 data = b'foo' * 10 # encrypt aes = AES(is_encrypt=True, key=key) self.assert_equal(bytes_to_long(aes.iv, 8), 0) cdata = aes.encrypt(data) self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466') self.assert_equal(bytes_to_long(aes.iv, 8), 2) # decrypt aes = AES(is_encrypt=False, key=key) self.assert_equal(bytes_to_long(aes.iv, 8), 0) pdata = aes.decrypt(cdata) self.assert_equal(data, pdata) self.assert_equal(bytes_to_long(aes.iv, 8), 2) def test_hmac_sha256(self): # RFC 4231 test vectors key = b'\x0b' * 20 # Also test that this works with memory views data = memoryview(unhexlify('4869205468657265')) hmac = unhexlify('b0344c61d8db38535ca8afceaf0bf12b' '881dc200c9833da726e9376c2e32cff7') assert hmac_sha256(key, data) == hmac key = unhexlify('4a656665') data = unhexlify('7768617420646f2079612077616e7420' '666f72206e6f7468696e673f') hmac = unhexlify('5bdcc146bf60754e6a042426089575c7' '5a003f089d2739839dec58b964ec3843') assert hmac_sha256(key, data) == hmac key = b'\xaa' * 20 data = b'\xdd' * 50 hmac = unhexlify('773ea91e36800e46854db8ebd09181a7' '2959098b3ef8c122d9635514ced565fe') assert hmac_sha256(key, data) == hmac key = unhexlify('0102030405060708090a0b0c0d0e0f10' '111213141516171819') data = b'\xcd' * 50 hmac = unhexlify('82558a389a443c0ea4cc819899f2083a' '85f0faa3e578f8077a2e3ff46729665b') assert hmac_sha256(key, data) == hmac def test_blake2b_256(self): # In BLAKE2 the output length actually is part of the hashes personality - it is *not* simple truncation like in # the SHA-2 family. Therefore we need to generate test vectors ourselves (as is true for most applications that # are not precisely vanilla BLAKE2b-512 or BLAKE2s-256). # # Obtained via "b2sum" utility from the official BLAKE2 repository. It calculates the exact hash of a file's # contents, no extras (like length) included. assert blake2b_256(b'', b'abc') == unhexlify('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319') assert blake2b_256(b'a', b'bc') == unhexlify('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319') assert blake2b_256(b'ab', b'c') == unhexlify('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319') assert blake2b_256(b'abc', b'') == unhexlify('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319') key = unhexlify('e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676') data = memoryview(b'1234567890' * 100) assert blake2b_256(key, data) == unhexlify('97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2') # These test vectors come from https://www.kullo.net/blog/hkdf-sha-512-test-vectors/ # who claims to have verified these against independent Python and C++ implementations. def test_hkdf_hmac_sha512(self): ikm = b'\x0b' * 22 salt = bytes.fromhex('000102030405060708090a0b0c') info = bytes.fromhex('f0f1f2f3f4f5f6f7f8f9') l = 42 okm = hkdf_hmac_sha512(ikm, salt, info, l) assert okm == bytes.fromhex('832390086cda71fb47625bb5ceb168e4c8e26a1a16ed34d9fc7fe92c1481579338da362cb8d9f925d7cb') def test_hkdf_hmac_sha512_2(self): ikm = bytes.fromhex('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f2021222324252627' '28292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f') salt = bytes.fromhex('606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868' '788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf') info = bytes.fromhex('b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7' 'd8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff') l = 82 okm = hkdf_hmac_sha512(ikm, salt, info, l) assert okm == bytes.fromhex('ce6c97192805b346e6161e821ed165673b84f400a2b514b2fe23d84cd189ddf1b695b48cbd1c838844' '1137b3ce28f16aa64ba33ba466b24df6cfcb021ecff235f6a2056ce3af1de44d572097a8505d9e7a93') def test_hkdf_hmac_sha512_3(self): ikm = bytes.fromhex('0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b') salt = None info = b'' l = 42 okm = hkdf_hmac_sha512(ikm, salt, info, l) assert okm == bytes.fromhex('f5fa02b18298a72a8c23898a8703472c6eb179dc204c03425c970e3b164bf90fff22d04836d0e2343bac') def test_hkdf_hmac_sha512_4(self): ikm = bytes.fromhex('0b0b0b0b0b0b0b0b0b0b0b') salt = bytes.fromhex('000102030405060708090a0b0c') info = bytes.fromhex('f0f1f2f3f4f5f6f7f8f9') l = 42 okm = hkdf_hmac_sha512(ikm, salt, info, l) assert okm == bytes.fromhex('7413e8997e020610fbf6823f2ce14bff01875db1ca55f68cfcf3954dc8aff53559bd5e3028b080f7c068') def test_hkdf_hmac_sha512_5(self): ikm = bytes.fromhex('0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c') salt = None info = b'' l = 42 okm = hkdf_hmac_sha512(ikm, salt, info, l) assert okm == bytes.fromhex('1407d46013d98bc6decefcfee55f0f90b0c7f63d68eb1a80eaf07e953cfc0a3a5240a155d6e4daa965bb') borgbackup-1.1.15/src/borg/testsuite/key.py0000644000175000017500000004337013771325506020627 0ustar useruser00000000000000import getpass import os.path import re import tempfile from binascii import hexlify, unhexlify import pytest from ..crypto.key import Passphrase, PasswordRetriesExceeded, bin_to_hex from ..crypto.key import PlaintextKey, PassphraseKey, AuthenticatedKey, RepoKey, KeyfileKey, \ Blake2KeyfileKey, Blake2RepoKey, Blake2AuthenticatedKey from ..crypto.key import ID_HMAC_SHA_256, ID_BLAKE2b_256 from ..crypto.key import TAMRequiredError, TAMInvalid, TAMUnsupportedSuiteError, UnsupportedManifestError from ..crypto.key import identify_key from ..crypto.low_level import bytes_to_long, num_aes_blocks from ..helpers import IntegrityError from ..helpers import Location from ..helpers import StableDict from ..helpers import get_security_dir from ..helpers import msgpack class TestKey: class MockArgs: location = Location(tempfile.mkstemp()[1]) keyfile2_key_file = """ BORG_KEY 0000000000000000000000000000000000000000000000000000000000000000 hqppdGVyYXRpb25zzgABhqCkaGFzaNoAIMyonNI+7Cjv0qHi0AOBM6bLGxACJhfgzVD2oq bIS9SFqWFsZ29yaXRobaZzaGEyNTakc2FsdNoAINNK5qqJc1JWSUjACwFEWGTdM7Nd0a5l 1uBGPEb+9XM9p3ZlcnNpb24BpGRhdGHaANAYDT5yfPpU099oBJwMomsxouKyx/OG4QIXK2 hQCG2L2L/9PUu4WIuKvGrsXoP7syemujNfcZws5jLp2UPva4PkQhQsrF1RYDEMLh2eF9Ol rwtkThq1tnh7KjWMG9Ijt7/aoQtq0zDYP/xaFF8XXSJxiyP5zjH5+spB6RL0oQHvbsliSh /cXJq7jrqmrJ1phd6dg4SHAM/i+hubadZoS6m25OQzYAW09wZD/phG8OVa698Z5ed3HTaT SmrtgJL3EoOKgUI9d6BLE4dJdBqntifo""".strip() keyfile2_cdata = unhexlify(re.sub(r'\W', '', """ 0055f161493fcfc16276e8c31493c4641e1eb19a79d0326fad0291e5a9c98e5933 00000000000003e8d21eaf9b86c297a8cd56432e1915bb """)) keyfile2_id = unhexlify('c3fbf14bc001ebcc3cd86e696c13482ed071740927cd7cbe1b01b4bfcee49314') keyfile_blake2_key_file = """ BORG_KEY 0000000000000000000000000000000000000000000000000000000000000000 hqlhbGdvcml0aG2mc2hhMjU2pGRhdGHaAZBu680Do3CmfWzeMCwe48KJi3Vps9mEDy7MKF TastsEhiAd1RQMuxfZpklkLeddMMWk+aPtFiURRFb02JLXV5cKRC1o2ZDdiNa0nao+o6+i gUjjsea9TAu25t3vxh8uQWs5BuKRLBRr0nUgrSd0IYMUgn+iVbLJRzCCssvxsklkwQxN3F Y+MvBnn8kUXSeoSoQ2l0fBHzq94Y7LMOm/owMam5URnE8/UEc6ZXBrbyX4EXxDtUqJcs+D i451thtlGdigDLpvf9nyK66mjiCpPCTCgtlzq0Pe1jcdhnsUYLg+qWzXZ7e2opEZoC6XxS 3DIuBOxG3Odqj9IKB+6/kl94vz98awPWFSpYcLZVWu7sIP38ZkUK+ad5MHTo/LvTuZdFnd iqKzZIDUJl3Zl1WGmP/0xVOmfIlznkCZy4d3SMuujwIcqQ5kDvwDRPpdhBBk+UWQY5vFXk kR1NBNLSTyhAzu3fiUmFl0qZ+UWPRkGAEBy/NuoEibrWwab8BX97cATyvnmOqYkU9PT0C6 l2l9E4bPpGhhc2jaACDnIa8KgKv84/b5sjaMgSZeIVkuKSLJy2NN8zoH8lnd36ppdGVyYX Rpb25zzgABhqCkc2FsdNoAIEJLlLh7q74j3q53856H5GgzA1HH+aW5bA/as544+PGkp3Zl cnNpb24B""".strip() keyfile_blake2_cdata = bytes.fromhex('04fdf9475cf2323c0ba7a99ddc011064f2e7d039f539f2e448' '0e6f5fc6ff9993d604040404040404098c8cee1c6db8c28947') # Verified against b2sum. Entire string passed to BLAKE2, including the padded 64 byte key contained in # keyfile_blake2_key_file above is # 19280471de95185ec27ecb6fc9edbb4f4db26974c315ede1cd505fab4250ce7cd0d081ea66946c # 95f0db934d5f616921efbd869257e8ded2bd9bd93d7f07b1a30000000000000000000000000000 # 000000000000000000000000000000000000000000000000000000000000000000000000000000 # 00000000000000000000007061796c6f6164 # p a y l o a d keyfile_blake2_id = bytes.fromhex('d8bc68e961c79f99be39061589e5179b2113cd9226e07b08ddd4a1fef7ce93fb') @pytest.fixture def keys_dir(self, request, monkeypatch, tmpdir): monkeypatch.setenv('BORG_KEYS_DIR', str(tmpdir)) return tmpdir @pytest.fixture(params=( PlaintextKey, AuthenticatedKey, KeyfileKey, RepoKey, Blake2KeyfileKey, Blake2RepoKey, Blake2AuthenticatedKey, )) def key(self, request, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', 'test') return request.param.create(self.MockRepository(), self.MockArgs()) class MockRepository: class _Location: orig = '/some/place' def canonical_path(self): return self.orig _location = _Location() id = bytes(32) id_str = bin_to_hex(id) def get_free_nonce(self): return None def commit_nonce_reservation(self, next_unreserved, start_nonce): pass def save_key(self, data): self.key_data = data def load_key(self): return self.key_data def test_plaintext(self): key = PlaintextKey.create(None, None) chunk = b'foo' assert hexlify(key.id_hash(chunk)) == b'2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae' assert chunk == key.decrypt(key.id_hash(chunk), key.encrypt(chunk)) def test_keyfile(self, monkeypatch, keys_dir): monkeypatch.setenv('BORG_PASSPHRASE', 'test') key = KeyfileKey.create(self.MockRepository(), self.MockArgs()) assert bytes_to_long(key.enc_cipher.iv, 8) == 0 manifest = key.encrypt(b'ABC') assert key.extract_nonce(manifest) == 0 manifest2 = key.encrypt(b'ABC') assert manifest != manifest2 assert key.decrypt(None, manifest) == key.decrypt(None, manifest2) assert key.extract_nonce(manifest2) == 1 iv = key.extract_nonce(manifest) key2 = KeyfileKey.detect(self.MockRepository(), manifest) assert bytes_to_long(key2.enc_cipher.iv, 8) >= iv + num_aes_blocks(len(manifest) - KeyfileKey.PAYLOAD_OVERHEAD) # Key data sanity check assert len({key2.id_key, key2.enc_key, key2.enc_hmac_key}) == 3 assert key2.chunk_seed != 0 chunk = b'foo' assert chunk == key2.decrypt(key.id_hash(chunk), key.encrypt(chunk)) def test_keyfile_nonce_rollback_protection(self, monkeypatch, keys_dir): monkeypatch.setenv('BORG_PASSPHRASE', 'test') repository = self.MockRepository() with open(os.path.join(get_security_dir(repository.id_str), 'nonce'), "w") as fd: fd.write("0000000000002000") key = KeyfileKey.create(repository, self.MockArgs()) data = key.encrypt(b'ABC') assert key.extract_nonce(data) == 0x2000 assert key.decrypt(None, data) == b'ABC' def test_keyfile_kfenv(self, tmpdir, monkeypatch): keyfile = tmpdir.join('keyfile') monkeypatch.setenv('BORG_KEY_FILE', str(keyfile)) monkeypatch.setenv('BORG_PASSPHRASE', 'testkf') assert not keyfile.exists() key = KeyfileKey.create(self.MockRepository(), self.MockArgs()) assert keyfile.exists() chunk = b'ABC' chunk_id = key.id_hash(chunk) chunk_cdata = key.encrypt(chunk) key = KeyfileKey.detect(self.MockRepository(), chunk_cdata) assert chunk == key.decrypt(chunk_id, chunk_cdata) keyfile.remove() with pytest.raises(FileNotFoundError): KeyfileKey.detect(self.MockRepository(), chunk_cdata) def test_keyfile2(self, monkeypatch, keys_dir): with keys_dir.join('keyfile').open('w') as fd: fd.write(self.keyfile2_key_file) monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase') key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata) assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata) == b'payload' def test_keyfile2_kfenv(self, tmpdir, monkeypatch): keyfile = tmpdir.join('keyfile') with keyfile.open('w') as fd: fd.write(self.keyfile2_key_file) monkeypatch.setenv('BORG_KEY_FILE', str(keyfile)) monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase') key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata) assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata) == b'payload' def test_keyfile_blake2(self, monkeypatch, keys_dir): with keys_dir.join('keyfile').open('w') as fd: fd.write(self.keyfile_blake2_key_file) monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase') key = Blake2KeyfileKey.detect(self.MockRepository(), self.keyfile_blake2_cdata) assert key.decrypt(self.keyfile_blake2_id, self.keyfile_blake2_cdata) == b'payload' def test_passphrase(self, keys_dir, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', 'test') key = PassphraseKey.create(self.MockRepository(), None) assert bytes_to_long(key.enc_cipher.iv, 8) == 0 assert hexlify(key.id_key) == b'793b0717f9d8fb01c751a487e9b827897ceea62409870600013fbc6b4d8d7ca6' assert hexlify(key.enc_hmac_key) == b'b885a05d329a086627412a6142aaeb9f6c54ab7950f996dd65587251f6bc0901' assert hexlify(key.enc_key) == b'2ff3654c6daf7381dbbe718d2b20b4f1ea1e34caa6cc65f6bb3ac376b93fed2a' assert key.chunk_seed == -775740477 manifest = key.encrypt(b'ABC') assert key.extract_nonce(manifest) == 0 manifest2 = key.encrypt(b'ABC') assert manifest != manifest2 assert key.decrypt(None, manifest) == key.decrypt(None, manifest2) assert key.extract_nonce(manifest2) == 1 iv = key.extract_nonce(manifest) key2 = PassphraseKey.detect(self.MockRepository(), manifest) assert bytes_to_long(key2.enc_cipher.iv, 8) == iv + num_aes_blocks(len(manifest) - PassphraseKey.PAYLOAD_OVERHEAD) assert key.id_key == key2.id_key assert key.enc_hmac_key == key2.enc_hmac_key assert key.enc_key == key2.enc_key assert key.chunk_seed == key2.chunk_seed chunk = b'foo' assert hexlify(key.id_hash(chunk)) == b'818217cf07d37efad3860766dcdf1d21e401650fed2d76ed1d797d3aae925990' assert chunk == key2.decrypt(key2.id_hash(chunk), key.encrypt(chunk)) def _corrupt_byte(self, key, data, offset): data = bytearray(data) data[offset] ^= 1 with pytest.raises(IntegrityError): key.decrypt(b'', data) def test_decrypt_integrity(self, monkeypatch, keys_dir): with keys_dir.join('keyfile').open('w') as fd: fd.write(self.keyfile2_key_file) monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase') key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata) data = self.keyfile2_cdata for i in range(len(data)): self._corrupt_byte(key, data, i) with pytest.raises(IntegrityError): data = bytearray(self.keyfile2_cdata) id = bytearray(key.id_hash(data)) # corrupt chunk id id[12] = 0 key.decrypt(id, data) def test_roundtrip(self, key): repository = key.repository plaintext = b'foo' encrypted = key.encrypt(plaintext) identified_key_class = identify_key(encrypted) assert identified_key_class == key.__class__ loaded_key = identified_key_class.detect(repository, encrypted) decrypted = loaded_key.decrypt(None, encrypted) assert decrypted == plaintext def test_decrypt_decompress(self, key): plaintext = b'123456789' encrypted = key.encrypt(plaintext) assert key.decrypt(None, encrypted, decompress=False) != plaintext assert key.decrypt(None, encrypted) == plaintext def test_assert_id(self, key): plaintext = b'123456789' id = key.id_hash(plaintext) key.assert_id(id, plaintext) id_changed = bytearray(id) id_changed[0] ^= 1 with pytest.raises(IntegrityError): key.assert_id(id_changed, plaintext) plaintext_changed = plaintext + b'1' with pytest.raises(IntegrityError): key.assert_id(id, plaintext_changed) def test_authenticated_encrypt(self, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', 'test') key = AuthenticatedKey.create(self.MockRepository(), self.MockArgs()) assert AuthenticatedKey.id_hash is ID_HMAC_SHA_256.id_hash assert len(key.id_key) == 32 plaintext = b'123456789' authenticated = key.encrypt(plaintext) # 0x07 is the key TYPE, 0x0100 identifies LZ4 compression, 0x90 is part of LZ4 and means that an uncompressed # block of length nine follows (the plaintext). assert authenticated == b'\x07\x01\x00\x90' + plaintext def test_blake2_authenticated_encrypt(self, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', 'test') key = Blake2AuthenticatedKey.create(self.MockRepository(), self.MockArgs()) assert Blake2AuthenticatedKey.id_hash is ID_BLAKE2b_256.id_hash assert len(key.id_key) == 128 plaintext = b'123456789' authenticated = key.encrypt(plaintext) # 0x06 is the key TYPE, 0x0100 identifies LZ4 compression, 0x90 is part of LZ4 and means that an uncompressed # block of length nine follows (the plaintext). assert authenticated == b'\x06\x01\x00\x90' + plaintext class TestPassphrase: def test_passphrase_new_verification(self, capsys, monkeypatch): monkeypatch.setattr(getpass, 'getpass', lambda prompt: "12aöäü") monkeypatch.setenv('BORG_DISPLAY_PASSPHRASE', 'no') Passphrase.new() out, err = capsys.readouterr() assert "12" not in out assert "12" not in err monkeypatch.setenv('BORG_DISPLAY_PASSPHRASE', 'yes') passphrase = Passphrase.new() out, err = capsys.readouterr() assert "313261c3b6c3a4c3bc" not in out assert "313261c3b6c3a4c3bc" in err assert passphrase == "12aöäü" monkeypatch.setattr(getpass, 'getpass', lambda prompt: "1234/@=") Passphrase.new() out, err = capsys.readouterr() assert "1234/@=" not in out assert "1234/@=" in err def test_passphrase_new_empty(self, capsys, monkeypatch): monkeypatch.delenv('BORG_PASSPHRASE', False) monkeypatch.setattr(getpass, 'getpass', lambda prompt: "") with pytest.raises(PasswordRetriesExceeded): Passphrase.new(allow_empty=False) out, err = capsys.readouterr() assert "must not be blank" in err def test_passphrase_new_retries(self, monkeypatch): monkeypatch.delenv('BORG_PASSPHRASE', False) ascending_numbers = iter(range(20)) monkeypatch.setattr(getpass, 'getpass', lambda prompt: str(next(ascending_numbers))) with pytest.raises(PasswordRetriesExceeded): Passphrase.new() def test_passphrase_repr(self): assert "secret" not in repr(Passphrase("secret")) class TestTAM: @pytest.fixture def key(self, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', 'test') return KeyfileKey.create(TestKey.MockRepository(), TestKey.MockArgs()) def test_unpack_future(self, key): blob = b'\xc1\xc1\xc1\xc1foobar' with pytest.raises(UnsupportedManifestError): key.unpack_and_verify_manifest(blob) blob = b'\xc1\xc1\xc1' with pytest.raises((ValueError, msgpack.UnpackException)): key.unpack_and_verify_manifest(blob) def test_missing_when_required(self, key): blob = msgpack.packb({}) with pytest.raises(TAMRequiredError): key.unpack_and_verify_manifest(blob) def test_missing(self, key): blob = msgpack.packb({}) key.tam_required = False unpacked, verified = key.unpack_and_verify_manifest(blob) assert unpacked == {} assert not verified def test_unknown_type_when_required(self, key): blob = msgpack.packb({ 'tam': { 'type': 'HMAC_VOLLBIT', }, }) with pytest.raises(TAMUnsupportedSuiteError): key.unpack_and_verify_manifest(blob) def test_unknown_type(self, key): blob = msgpack.packb({ 'tam': { 'type': 'HMAC_VOLLBIT', }, }) key.tam_required = False unpacked, verified = key.unpack_and_verify_manifest(blob) assert unpacked == {} assert not verified @pytest.mark.parametrize('tam, exc', ( ({}, TAMUnsupportedSuiteError), ({'type': b'\xff'}, TAMUnsupportedSuiteError), (None, TAMInvalid), (1234, TAMInvalid), )) def test_invalid(self, key, tam, exc): blob = msgpack.packb({ 'tam': tam, }) with pytest.raises(exc): key.unpack_and_verify_manifest(blob) @pytest.mark.parametrize('hmac, salt', ( ({}, bytes(64)), (bytes(64), {}), (None, bytes(64)), (bytes(64), None), )) def test_wrong_types(self, key, hmac, salt): data = { 'tam': { 'type': 'HKDF_HMAC_SHA512', 'hmac': hmac, 'salt': salt }, } tam = data['tam'] if hmac is None: del tam['hmac'] if salt is None: del tam['salt'] blob = msgpack.packb(data) with pytest.raises(TAMInvalid): key.unpack_and_verify_manifest(blob) def test_round_trip(self, key): data = {'foo': 'bar'} blob = key.pack_and_authenticate_metadata(data) assert blob.startswith(b'\x82') unpacked = msgpack.unpackb(blob) assert unpacked[b'tam'][b'type'] == b'HKDF_HMAC_SHA512' unpacked, verified = key.unpack_and_verify_manifest(blob) assert verified assert unpacked[b'foo'] == b'bar' assert b'tam' not in unpacked @pytest.mark.parametrize('which', (b'hmac', b'salt')) def test_tampered(self, key, which): data = {'foo': 'bar'} blob = key.pack_and_authenticate_metadata(data) assert blob.startswith(b'\x82') unpacked = msgpack.unpackb(blob, object_hook=StableDict) assert len(unpacked[b'tam'][which]) == 64 unpacked[b'tam'][which] = unpacked[b'tam'][which][0:32] + bytes(32) assert len(unpacked[b'tam'][which]) == 64 blob = msgpack.packb(unpacked) with pytest.raises(TAMInvalid): key.unpack_and_verify_manifest(blob) borgbackup-1.1.15/src/borg/testsuite/patterns.py0000644000175000017500000004037413771325506021700 0ustar useruser00000000000000import argparse import io import os.path import sys import pytest from ..patterns import PathFullPattern, PathPrefixPattern, FnmatchPattern, ShellPattern, RegexPattern from ..patterns import load_exclude_file, load_pattern_file from ..patterns import parse_pattern, PatternMatcher def check_patterns(files, pattern, expected): """Utility for testing patterns. """ assert all([f == os.path.normpath(f) for f in files]), "Pattern matchers expect normalized input paths" matched = [f for f in files if pattern.match(f)] assert matched == (files if expected is None else expected) @pytest.mark.parametrize("pattern, expected", [ # "None" means all files, i.e. all match the given pattern ("/", []), ("/home", ["/home"]), ("/home///", ["/home"]), ("/./home", ["/home"]), ("/home/user", ["/home/user"]), ("/home/user2", ["/home/user2"]), ("/home/user/.bashrc", ["/home/user/.bashrc"]), ]) def test_patterns_full(pattern, expected): files = ["/home", "/home/user", "/home/user2", "/home/user/.bashrc", ] check_patterns(files, PathFullPattern(pattern), expected) @pytest.mark.parametrize("pattern, expected", [ # "None" means all files, i.e. all match the given pattern ("", []), ("relative", []), ("relative/path/", ["relative/path"]), ("relative/path", ["relative/path"]), ]) def test_patterns_full_relative(pattern, expected): files = ["relative/path", "relative/path2", ] check_patterns(files, PathFullPattern(pattern), expected) @pytest.mark.parametrize("pattern, expected", [ # "None" means all files, i.e. all match the given pattern ("/", None), ("/./", None), ("", []), ("/home/u", []), ("/home/user", ["/home/user/.profile", "/home/user/.bashrc"]), ("/etc", ["/etc/server/config", "/etc/server/hosts"]), ("///etc//////", ["/etc/server/config", "/etc/server/hosts"]), ("/./home//..//home/user2", ["/home/user2/.profile", "/home/user2/public_html/index.html"]), ("/srv", ["/srv/messages", "/srv/dmesg"]), ]) def test_patterns_prefix(pattern, expected): files = [ "/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html", "/srv/messages", "/srv/dmesg", ] check_patterns(files, PathPrefixPattern(pattern), expected) @pytest.mark.parametrize("pattern, expected", [ # "None" means all files, i.e. all match the given pattern ("", []), ("foo", []), ("relative", ["relative/path1", "relative/two"]), ("more", ["more/relative"]), ]) def test_patterns_prefix_relative(pattern, expected): files = ["relative/path1", "relative/two", "more/relative"] check_patterns(files, PathPrefixPattern(pattern), expected) @pytest.mark.parametrize("pattern, expected", [ # "None" means all files, i.e. all match the given pattern ("/*", None), ("/./*", None), ("*", None), ("*/*", None), ("*///*", None), ("/home/u", []), ("/home/*", ["/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]), ("/home/user/*", ["/home/user/.profile", "/home/user/.bashrc"]), ("/etc/*", ["/etc/server/config", "/etc/server/hosts"]), ("*/.pr????e", ["/home/user/.profile", "/home/user2/.profile"]), ("///etc//////*", ["/etc/server/config", "/etc/server/hosts"]), ("/./home//..//home/user2/*", ["/home/user2/.profile", "/home/user2/public_html/index.html"]), ("/srv*", ["/srv/messages", "/srv/dmesg"]), ("/home/*/.thumbnails", ["/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]), ]) def test_patterns_fnmatch(pattern, expected): files = [ "/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html", "/srv/messages", "/srv/dmesg", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails", ] check_patterns(files, FnmatchPattern(pattern), expected) @pytest.mark.parametrize("pattern, expected", [ # "None" means all files, i.e. all match the given pattern ("*", None), ("**/*", None), ("/**/*", None), ("/./*", None), ("*/*", None), ("*///*", None), ("/home/u", []), ("/home/*", ["/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]), ("/home/user/*", ["/home/user/.profile", "/home/user/.bashrc"]), ("/etc/*/*", ["/etc/server/config", "/etc/server/hosts"]), ("/etc/**/*", ["/etc/server/config", "/etc/server/hosts"]), ("/etc/**/*/*", ["/etc/server/config", "/etc/server/hosts"]), ("*/.pr????e", []), ("**/.pr????e", ["/home/user/.profile", "/home/user2/.profile"]), ("///etc//////*", ["/etc/server/config", "/etc/server/hosts"]), ("/./home//..//home/user2/", ["/home/user2/.profile", "/home/user2/public_html/index.html"]), ("/./home//..//home/user2/**/*", ["/home/user2/.profile", "/home/user2/public_html/index.html"]), ("/srv*/", ["/srv/messages", "/srv/dmesg", "/srv2/blafasel"]), ("/srv*", ["/srv", "/srv/messages", "/srv/dmesg", "/srv2", "/srv2/blafasel"]), ("/srv/*", ["/srv/messages", "/srv/dmesg"]), ("/srv2/**", ["/srv2", "/srv2/blafasel"]), ("/srv2/**/", ["/srv2/blafasel"]), ("/home/*/.thumbnails", ["/home/foo/.thumbnails"]), ("/home/*/*/.thumbnails", ["/home/foo/bar/.thumbnails"]), ]) def test_patterns_shell(pattern, expected): files = [ "/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html", "/srv", "/srv/messages", "/srv/dmesg", "/srv2", "/srv2/blafasel", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails", ] check_patterns(files, ShellPattern(pattern), expected) @pytest.mark.parametrize("pattern, expected", [ # "None" means all files, i.e. all match the given pattern ("", None), (".*", None), ("^/", None), ("^abc$", []), ("^[^/]", []), ("^(?!/srv|/foo|/opt)", ["/home", "/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails", ]), ]) def test_patterns_regex(pattern, expected): files = [ '/srv/data', '/foo/bar', '/home', '/home/user/.profile', '/home/user/.bashrc', '/home/user2/.profile', '/home/user2/public_html/index.html', '/opt/log/messages.txt', '/opt/log/dmesg.txt', "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails", ] obj = RegexPattern(pattern) assert str(obj) == pattern assert obj.pattern == pattern check_patterns(files, obj, expected) def test_regex_pattern(): # The forward slash must match the platform-specific path separator assert RegexPattern("^/$").match("/") assert RegexPattern("^/$").match(os.path.sep) assert not RegexPattern(r"^\\$").match("/") def use_normalized_unicode(): return sys.platform in ("darwin",) def _make_test_patterns(pattern): return [PathPrefixPattern(pattern), FnmatchPattern(pattern), RegexPattern("^{}/foo$".format(pattern)), ShellPattern(pattern), ] @pytest.mark.parametrize("pattern", _make_test_patterns("b\N{LATIN SMALL LETTER A WITH ACUTE}")) def test_composed_unicode_pattern(pattern): assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo") assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo") == use_normalized_unicode() @pytest.mark.parametrize("pattern", _make_test_patterns("ba\N{COMBINING ACUTE ACCENT}")) def test_decomposed_unicode_pattern(pattern): assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo") == use_normalized_unicode() assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo") @pytest.mark.parametrize("pattern", _make_test_patterns(str(b"ba\x80", "latin1"))) def test_invalid_unicode_pattern(pattern): assert not pattern.match("ba/foo") assert pattern.match(str(b"ba\x80/foo", "latin1")) @pytest.mark.parametrize("lines, expected", [ # "None" means all files, i.e. none excluded ([], None), (["# Comment only"], None), (["*"], []), (["# Comment", "*/something00.txt", " *whitespace* ", # Whitespace before comment " #/ws*", # Empty line "", "# EOF"], ["/more/data", "/home", " #/wsfoobar"]), ([r"re:.*"], []), ([r"re:\s"], ["/data/something00.txt", "/more/data", "/home"]), ([r"re:(.)(\1)"], ["/more/data", "/home", "\tstart/whitespace", "/whitespace/end\t"]), (["", "", "", "# This is a test with mixed pattern styles", # Case-insensitive pattern r"re:(?i)BAR|ME$", "", "*whitespace*", "fm:*/something00*"], ["/more/data"]), ([r" re:^\s "], ["/data/something00.txt", "/more/data", "/home", "/whitespace/end\t"]), ([r" re:\s$ "], ["/data/something00.txt", "/more/data", "/home", " #/wsfoobar", "\tstart/whitespace"]), (["pp:./"], None), (["pp:/"], [" #/wsfoobar", "\tstart/whitespace"]), (["pp:aaabbb"], None), (["pp:/data", "pp: #/", "pp:\tstart", "pp:/whitespace"], ["/more/data", "/home"]), (["/nomatch", "/more/*"], ['/data/something00.txt', '/home', ' #/wsfoobar', '\tstart/whitespace', '/whitespace/end\t']), # the order of exclude patterns shouldn't matter (["/more/*", "/nomatch"], ['/data/something00.txt', '/home', ' #/wsfoobar', '\tstart/whitespace', '/whitespace/end\t']), ]) def test_exclude_patterns_from_file(tmpdir, lines, expected): files = [ '/data/something00.txt', '/more/data', '/home', ' #/wsfoobar', '\tstart/whitespace', '/whitespace/end\t', ] def evaluate(filename): patterns = [] load_exclude_file(open(filename, "rt"), patterns) matcher = PatternMatcher(fallback=True) matcher.add_inclexcl(patterns) return [path for path in files if matcher.match(path)] exclfile = tmpdir.join("exclude.txt") with exclfile.open("wt") as fh: fh.write("\n".join(lines)) assert evaluate(str(exclfile)) == (files if expected is None else expected) @pytest.mark.parametrize("lines, expected_roots, expected_numpatterns", [ # "None" means all files, i.e. none excluded ([], [], 0), (["# Comment only"], [], 0), (["- *"], [], 1), (["+fm:*/something00.txt", "-/data"], [], 2), (["R /"], ["/"], 0), (["R /", "# comment"], ["/"], 0), (["# comment", "- /data", "R /home"], ["/home"], 1), ]) def test_load_patterns_from_file(tmpdir, lines, expected_roots, expected_numpatterns): def evaluate(filename): roots = [] inclexclpatterns = [] load_pattern_file(open(filename, "rt"), roots, inclexclpatterns) return roots, len(inclexclpatterns) patternfile = tmpdir.join("patterns.txt") with patternfile.open("wt") as fh: fh.write("\n".join(lines)) roots, numpatterns = evaluate(str(patternfile)) assert roots == expected_roots assert numpatterns == expected_numpatterns def test_switch_patterns_style(): patterns = """\ +0_initial_default_is_shell p fm +1_fnmatch P re +2_regex +3_more_regex P pp +4_pathprefix p fm p sh +5_shell """ pattern_file = io.StringIO(patterns) roots, patterns = [], [] load_pattern_file(pattern_file, roots, patterns) assert len(patterns) == 6 assert isinstance(patterns[0].val, ShellPattern) assert isinstance(patterns[1].val, FnmatchPattern) assert isinstance(patterns[2].val, RegexPattern) assert isinstance(patterns[3].val, RegexPattern) assert isinstance(patterns[4].val, PathPrefixPattern) assert isinstance(patterns[5].val, ShellPattern) @pytest.mark.parametrize("lines", [ (["X /data"]), # illegal pattern type prefix (["/data"]), # need a pattern type prefix ]) def test_load_invalid_patterns_from_file(tmpdir, lines): patternfile = tmpdir.join("patterns.txt") with patternfile.open("wt") as fh: fh.write("\n".join(lines)) filename = str(patternfile) with pytest.raises(argparse.ArgumentTypeError): roots = [] inclexclpatterns = [] load_pattern_file(open(filename, "rt"), roots, inclexclpatterns) @pytest.mark.parametrize("lines, expected", [ # "None" means all files, i.e. none excluded ([], None), (["# Comment only"], None), (["- *"], []), # default match type is sh: for patterns -> * doesn't match a / (["-*/something0?.txt"], ['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t', '/home/other']), (["-fm:*/something00.txt"], ['/data', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t', '/home/other']), (["-fm:*/something0?.txt"], ["/data", '/home', '/home/leo', '/home/leo/t', '/home/other']), (["+/*/something0?.txt", "-/data"], ["/data/something00.txt", '/home', '/home/leo', '/home/leo/t', '/home/other']), (["+fm:*/something00.txt", "-/data"], ["/data/something00.txt", '/home', '/home/leo', '/home/leo/t', '/home/other']), # include /home/leo and exclude the rest of /home: (["+/home/leo", "-/home/*"], ['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t']), # wrong order, /home/leo is already excluded by -/home/*: (["-/home/*", "+/home/leo"], ['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home']), (["+fm:/home/leo", "-/home/"], ['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t']), ]) def test_inclexcl_patterns_from_file(tmpdir, lines, expected): files = [ '/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t', '/home/other' ] def evaluate(filename): matcher = PatternMatcher(fallback=True) roots = [] inclexclpatterns = [] load_pattern_file(open(filename, "rt"), roots, inclexclpatterns) matcher.add_inclexcl(inclexclpatterns) return [path for path in files if matcher.match(path)] patternfile = tmpdir.join("patterns.txt") with patternfile.open("wt") as fh: fh.write("\n".join(lines)) assert evaluate(str(patternfile)) == (files if expected is None else expected) @pytest.mark.parametrize("pattern, cls", [ ("", FnmatchPattern), # Default style ("*", FnmatchPattern), ("/data/*", FnmatchPattern), # fnmatch style ("fm:", FnmatchPattern), ("fm:*", FnmatchPattern), ("fm:/data/*", FnmatchPattern), ("fm:fm:/data/*", FnmatchPattern), # Regular expression ("re:", RegexPattern), ("re:.*", RegexPattern), ("re:^/something/", RegexPattern), ("re:re:^/something/", RegexPattern), # Path prefix ("pp:", PathPrefixPattern), ("pp:/", PathPrefixPattern), ("pp:/data/", PathPrefixPattern), ("pp:pp:/data/", PathPrefixPattern), # Shell-pattern style ("sh:", ShellPattern), ("sh:*", ShellPattern), ("sh:/data/*", ShellPattern), ("sh:sh:/data/*", ShellPattern), ]) def test_parse_pattern(pattern, cls): assert isinstance(parse_pattern(pattern), cls) @pytest.mark.parametrize("pattern", ["aa:", "fo:*", "00:", "x1:abc"]) def test_parse_pattern_error(pattern): with pytest.raises(ValueError): parse_pattern(pattern) def test_pattern_matcher(): pm = PatternMatcher() assert pm.fallback is None for i in ["", "foo", "bar"]: assert pm.match(i) is None # add extra entries to aid in testing for target in ["A", "B", "Empty", "FileNotFound"]: pm.is_include_cmd[target] = target pm.add([RegexPattern("^a")], "A") pm.add([RegexPattern("^b"), RegexPattern("^z")], "B") pm.add([RegexPattern("^$")], "Empty") pm.fallback = "FileNotFound" assert pm.match("") == "Empty" assert pm.match("aaa") == "A" assert pm.match("bbb") == "B" assert pm.match("ccc") == "FileNotFound" assert pm.match("xyz") == "FileNotFound" assert pm.match("z") == "B" assert PatternMatcher(fallback="hey!").fallback == "hey!" borgbackup-1.1.15/src/borg/testsuite/compress.py0000644000175000017500000001327513771325506021673 0ustar useruser00000000000000import os import zlib try: import lzma except ImportError: lzma = None import pytest from ..compress import get_compressor, Compressor, CompressionSpec, CNONE, ZLIB, LZ4, LZMA, ZSTD, Auto buffer = bytes(2**16) data = b'fooooooooobaaaaaaaar' * 10 params = dict(name='zlib', level=6) def test_get_compressor(): c = get_compressor(name='none') assert isinstance(c, CNONE) c = get_compressor(name='lz4') assert isinstance(c, LZ4) c = get_compressor(name='zlib') assert isinstance(c, ZLIB) with pytest.raises(KeyError): get_compressor(name='foobar') def test_cnull(): c = get_compressor(name='none') cdata = c.compress(data) assert len(cdata) > len(data) assert data in cdata # it's not compressed and just in there 1:1 assert data == c.decompress(cdata) assert data == Compressor(**params).decompress(cdata) # autodetect def test_lz4(): c = get_compressor(name='lz4') cdata = c.compress(data) assert len(cdata) < len(data) assert data == c.decompress(cdata) assert data == Compressor(**params).decompress(cdata) # autodetect def test_lz4_buffer_allocation(): # test with a rather huge data object to see if buffer allocation / resizing works data = os.urandom(5 * 2**20) * 10 # 50MiB badly compressible data assert len(data) == 50 * 2**20 c = get_compressor(name='lz4') cdata = c.compress(data) assert len(cdata) > len(data) assert data == c.decompress(cdata) def test_zlib(): c = get_compressor(name='zlib') cdata = c.compress(data) assert len(cdata) < len(data) assert data == c.decompress(cdata) assert data == Compressor(**params).decompress(cdata) # autodetect def test_lzma(): if lzma is None: pytest.skip("No lzma support found.") c = get_compressor(name='lzma') cdata = c.compress(data) assert len(cdata) < len(data) assert data == c.decompress(cdata) assert data == Compressor(**params).decompress(cdata) # autodetect def test_zstd(): c = get_compressor(name='zstd') cdata = c.compress(data) assert len(cdata) < len(data) assert data == c.decompress(cdata) assert data == Compressor(**params).decompress(cdata) # autodetect def test_autodetect_invalid(): with pytest.raises(ValueError): Compressor(**params).decompress(b'\xff\xfftotalcrap') with pytest.raises(ValueError): Compressor(**params).decompress(b'\x08\x00notreallyzlib') def test_zlib_compat(): # for compatibility reasons, we do not add an extra header for zlib, # nor do we expect one when decompressing / autodetecting for level in range(10): c = get_compressor(name='zlib', level=level) cdata1 = c.compress(data) cdata2 = zlib.compress(data, level) assert cdata1 == cdata2 data2 = c.decompress(cdata2) assert data == data2 data2 = Compressor(**params).decompress(cdata2) assert data == data2 def test_compressor(): params_list = [ dict(name='none'), dict(name='lz4'), dict(name='zstd', level=1), dict(name='zstd', level=3), # avoiding high zstd levels, memory needs unclear dict(name='zlib', level=0), dict(name='zlib', level=6), dict(name='zlib', level=9), ] if lzma: params_list += [ dict(name='lzma', level=0), dict(name='lzma', level=6), # we do not test lzma on level 9 because of the huge memory needs ] for params in params_list: c = Compressor(**params) assert data == c.decompress(c.compress(data)) def test_auto(): compressor_auto_zlib = CompressionSpec('auto,zlib,9').compressor compressor_lz4 = CompressionSpec('lz4').compressor compressor_zlib = CompressionSpec('zlib,9').compressor data = bytes(500) compressed_auto_zlib = compressor_auto_zlib.compress(data) compressed_lz4 = compressor_lz4.compress(data) compressed_zlib = compressor_zlib.compress(data) ratio = len(compressed_zlib) / len(compressed_lz4) assert Compressor.detect(compressed_auto_zlib) == ZLIB if ratio < 0.99 else LZ4 data = b'\x00\xb8\xa3\xa2-O\xe1i\xb6\x12\x03\xc21\xf3\x8a\xf78\\\x01\xa5b\x07\x95\xbeE\xf8\xa3\x9ahm\xb1~' compressed = compressor_auto_zlib.compress(data) assert Compressor.detect(compressed) == CNONE def test_compression_specs(): with pytest.raises(ValueError): CompressionSpec('') assert isinstance(CompressionSpec('none').compressor, CNONE) assert isinstance(CompressionSpec('lz4').compressor, LZ4) zlib = CompressionSpec('zlib').compressor assert isinstance(zlib, ZLIB) assert zlib.level == 6 zlib = CompressionSpec('zlib,0').compressor assert isinstance(zlib, ZLIB) assert zlib.level == 0 zlib = CompressionSpec('zlib,9').compressor assert isinstance(zlib, ZLIB) assert zlib.level == 9 with pytest.raises(ValueError): CompressionSpec('zlib,9,invalid') lzma = CompressionSpec('lzma').compressor assert isinstance(lzma, LZMA) assert lzma.level == 6 lzma = CompressionSpec('lzma,0').compressor assert isinstance(lzma, LZMA) assert lzma.level == 0 lzma = CompressionSpec('lzma,9').compressor assert isinstance(lzma, LZMA) assert lzma.level == 9 zstd = CompressionSpec('zstd').compressor assert isinstance(zstd, ZSTD) assert zstd.level == 3 zstd = CompressionSpec('zstd,1').compressor assert isinstance(zstd, ZSTD) assert zstd.level == 1 zstd = CompressionSpec('zstd,22').compressor assert isinstance(zstd, ZSTD) assert zstd.level == 22 with pytest.raises(ValueError): CompressionSpec('lzma,9,invalid') with pytest.raises(ValueError): CompressionSpec('invalid') borgbackup-1.1.15/src/borg/testsuite/archive.py0000644000175000017500000002063313771325506021455 0ustar useruser00000000000000from collections import OrderedDict from datetime import datetime, timezone from io import StringIO from unittest.mock import Mock import pytest from . import BaseTestCase from ..crypto.key import PlaintextKey from ..archive import Archive, CacheChunkBuffer, RobustUnpacker, valid_msgpacked_dict, ITEM_KEYS, Statistics from ..archive import BackupOSError, backup_io, backup_io_iter from ..helpers import Manifest from ..helpers import msgpack from ..item import Item, ArchiveItem @pytest.fixture() def stats(): stats = Statistics() stats.update(20, 10, unique=True) return stats def test_stats_basic(stats): assert stats.osize == 20 assert stats.csize == stats.usize == 10 stats.update(20, 10, unique=False) assert stats.osize == 40 assert stats.csize == 20 assert stats.usize == 10 def tests_stats_progress(stats, monkeypatch, columns=80): monkeypatch.setenv('COLUMNS', str(columns)) out = StringIO() stats.show_progress(stream=out) s = '20 B O 10 B C 10 B D 0 N ' buf = ' ' * (columns - len(s)) assert out.getvalue() == s + buf + "\r" out = StringIO() stats.update(10**3, 0, unique=False) stats.show_progress(item=Item(path='foo'), final=False, stream=out) s = '1.02 kB O 10 B C 10 B D 0 N foo' buf = ' ' * (columns - len(s)) assert out.getvalue() == s + buf + "\r" out = StringIO() stats.show_progress(item=Item(path='foo'*40), final=False, stream=out) s = '1.02 kB O 10 B C 10 B D 0 N foofoofoofoofoofoofoofo...oofoofoofoofoofoofoofoofoo' buf = ' ' * (columns - len(s)) assert out.getvalue() == s + buf + "\r" def test_stats_format(stats): assert str(stats) == """\ This archive: 20 B 10 B 10 B""" s = "{0.osize_fmt}".format(stats) assert s == "20 B" # kind of redundant, but id is variable so we can't match reliably assert repr(stats) == ''.format(id(stats)) class MockCache: class MockRepo: def async_response(self, wait=True): pass def __init__(self): self.objects = {} self.repository = self.MockRepo() def add_chunk(self, id, chunk, stats=None, wait=True): self.objects[id] = chunk return id, len(chunk), len(chunk) class ArchiveTimestampTestCase(BaseTestCase): def _test_timestamp_parsing(self, isoformat, expected): repository = Mock() key = PlaintextKey(repository) manifest = Manifest(repository, key) a = Archive(repository, key, manifest, 'test', create=True) a.metadata = ArchiveItem(time=isoformat) self.assert_equal(a.ts, expected) def test_with_microseconds(self): self._test_timestamp_parsing( '1970-01-01T00:00:01.000001', datetime(1970, 1, 1, 0, 0, 1, 1, timezone.utc)) def test_without_microseconds(self): self._test_timestamp_parsing( '1970-01-01T00:00:01', datetime(1970, 1, 1, 0, 0, 1, 0, timezone.utc)) class ChunkBufferTestCase(BaseTestCase): def test(self): data = [Item(path='p1'), Item(path='p2')] cache = MockCache() key = PlaintextKey(None) chunks = CacheChunkBuffer(cache, key, None) for d in data: chunks.add(d) chunks.flush() chunks.flush(flush=True) self.assert_equal(len(chunks.chunks), 2) unpacker = msgpack.Unpacker() for id in chunks.chunks: unpacker.feed(cache.objects[id]) self.assert_equal(data, [Item(internal_dict=d) for d in unpacker]) def test_partial(self): big = "0123456789abcdefghijklmnopqrstuvwxyz" * 25000 data = [Item(path='full', source=big), Item(path='partial', source=big)] cache = MockCache() key = PlaintextKey(None) chunks = CacheChunkBuffer(cache, key, None) for d in data: chunks.add(d) chunks.flush(flush=False) # the code is expected to leave the last partial chunk in the buffer self.assert_equal(len(chunks.chunks), 3) self.assert_true(chunks.buffer.tell() > 0) # now really flush chunks.flush(flush=True) self.assert_equal(len(chunks.chunks), 4) self.assert_true(chunks.buffer.tell() == 0) unpacker = msgpack.Unpacker() for id in chunks.chunks: unpacker.feed(cache.objects[id]) self.assert_equal(data, [Item(internal_dict=d) for d in unpacker]) class RobustUnpackerTestCase(BaseTestCase): def make_chunks(self, items): return b''.join(msgpack.packb({'path': item}) for item in items) def _validator(self, value): return isinstance(value, dict) and value.get(b'path') in (b'foo', b'bar', b'boo', b'baz') def process(self, input): unpacker = RobustUnpacker(validator=self._validator, item_keys=ITEM_KEYS) result = [] for should_sync, chunks in input: if should_sync: unpacker.resync() for data in chunks: unpacker.feed(data) for item in unpacker: result.append(item) return result def test_extra_garbage_no_sync(self): chunks = [(False, [self.make_chunks([b'foo', b'bar'])]), (False, [b'garbage'] + [self.make_chunks([b'boo', b'baz'])])] result = self.process(chunks) self.assert_equal(result, [ {b'path': b'foo'}, {b'path': b'bar'}, 103, 97, 114, 98, 97, 103, 101, {b'path': b'boo'}, {b'path': b'baz'}]) def split(self, left, length): parts = [] while left: parts.append(left[:length]) left = left[length:] return parts def test_correct_stream(self): chunks = self.split(self.make_chunks([b'foo', b'bar', b'boo', b'baz']), 2) input = [(False, chunks)] result = self.process(input) self.assert_equal(result, [{b'path': b'foo'}, {b'path': b'bar'}, {b'path': b'boo'}, {b'path': b'baz'}]) def test_missing_chunk(self): chunks = self.split(self.make_chunks([b'foo', b'bar', b'boo', b'baz']), 4) input = [(False, chunks[:3]), (True, chunks[4:])] result = self.process(input) self.assert_equal(result, [{b'path': b'foo'}, {b'path': b'boo'}, {b'path': b'baz'}]) def test_corrupt_chunk(self): chunks = self.split(self.make_chunks([b'foo', b'bar', b'boo', b'baz']), 4) input = [(False, chunks[:3]), (True, [b'gar', b'bage'] + chunks[3:])] result = self.process(input) self.assert_equal(result, [{b'path': b'foo'}, {b'path': b'boo'}, {b'path': b'baz'}]) @pytest.fixture def item_keys_serialized(): return [msgpack.packb(name) for name in ITEM_KEYS] @pytest.mark.parametrize('packed', [b'', b'x', b'foobar', ] + [msgpack.packb(o) for o in ( [None, 0, 0.0, False, '', {}, [], ()] + [42, 23.42, True, b'foobar', {b'foo': b'bar'}, [b'foo', b'bar'], (b'foo', b'bar')] )]) def test_invalid_msgpacked_item(packed, item_keys_serialized): assert not valid_msgpacked_dict(packed, item_keys_serialized) # pytest-xdist requires always same order for the keys and dicts: IK = sorted(list(ITEM_KEYS)) @pytest.mark.parametrize('packed', [msgpack.packb(o) for o in [ {b'path': b'/a/b/c'}, # small (different msgpack mapping type!) OrderedDict((k, b'') for k in IK), # as big (key count) as it gets OrderedDict((k, b'x' * 1000) for k in IK), # as big (key count and volume) as it gets ]]) def test_valid_msgpacked_items(packed, item_keys_serialized): assert valid_msgpacked_dict(packed, item_keys_serialized) def test_key_length_msgpacked_items(): key = b'x' * 32 # 31 bytes is the limit for fixstr msgpack type data = {key: b''} item_keys_serialized = [msgpack.packb(key), ] assert valid_msgpacked_dict(msgpack.packb(data), item_keys_serialized) def test_backup_io(): with pytest.raises(BackupOSError): with backup_io: raise OSError(123) def test_backup_io_iter(): class Iterator: def __init__(self, exc): self.exc = exc def __next__(self): raise self.exc() oserror_iterator = Iterator(OSError) with pytest.raises(BackupOSError): for _ in backup_io_iter(oserror_iterator): pass normal_iterator = Iterator(StopIteration) for _ in backup_io_iter(normal_iterator): assert False, 'StopIteration handled incorrectly' borgbackup-1.1.15/src/borg/testsuite/attic.tar.gz0000644000175000017500000000540613771325506021716 0ustar useruser00000000000000 6Y S"hNŒ}D]0ɾd_dFeddsReDgġҊն"EVS0c:VhSA;JN~+q3dW9&9Kfν7nGyBQ$+w^yITdQ=T*EajaαF_2 I K`s,8\\i$p}H0ZVRTͣ^I5]e^L #ɊGUT]xکԙj^/9S}L[,*kZu6uzsSkTIu]ᆖgtE1FQj(=NJjQ]܉6_G* \յѺlk1^ipSOkYz:4 FTSBN&@,)!Eq7:ޖ^_IƋ͍q}:mJ;9kP* 5EТhN%5av;׹xuQ E=5Qw?_HiBfdMuT,v@F.j 9_mE@n,B:#\4TWq;¹-^j"Z&!TI>fuTBmmtǚ@` T6/BC7/{k<~xJ ?&'Ne\3Ui@,:2+  F:uUxwYhd@^$f9{l7EzCG8@ݻHEuFu_%/ P7MuC8@;H_n 7}pä/ Pn$n. fP fuҗOKC9C9C9C9C9C9C9C9CO3.Gc\`psH_ ү7-$}x|-7hq q!"}s|ۦ: >ȷ\ o's[Eȷ/ 9C9C9C9C9C9C9C9C9;8q^B7%\H HH \r}=@;}\7-7\o?@s4{m'|'|?C9C9C9C9C9C9C10spl58+d#ϼ"3 EDERu<ŕmN~ .cH*m?-f:"H°s QIA2;}+K*&}gl#_E,T˵ގGN_$ Tդ*y7zߴ$`xDo/ ׍nͥ%tDU$;|$`byD0tﺒqMߞ}$`42$P}/ x|ǰ8@ mAfKөa-xf0n4a-? Vۂo,]w VۂIRj[3|Vۂ/\Td#tTt/[QP=ۨ;ͦ?E@yd-ӏJ/=F@ON@ۯEtz;ͺ-AzXY؂ ]t@wN8Jt7>%mާÛu-XW@_XuTtzsӏ5y/z~g<?a G|*>~*:=h7c3/Ѕtu7ӇNK@kU:Hi֥m}ΝgeHtzۍw-ni*|˴ۂoÛu^-h='~ Ngy7EUު˩5;X.*:}ͷQyػ"*:S[?dӡ/Wh?;zU*@:4w+: O:stx+@8|[%oֹ`S*^]O@W_x:p9 N??Tt8sN^ܗޠ;TtYy *:}_ ՏҝέaNna *?V9`^Y tu;[Ãtx/vE@_㛏Qk_sdſ5borgbackup-1.1.15/src/borg/testsuite/helpers.py0000644000175000017500000012363413771325506021503 0ustar useruser00000000000000import hashlib import io import os import shutil import sys from argparse import ArgumentTypeError from datetime import datetime, timezone, timedelta from time import mktime, strptime, sleep import pytest from .. import platform from ..helpers import Location from ..helpers import Buffer from ..helpers import partial_format, format_file_size, parse_file_size, format_timedelta, format_line, PlaceholderError, replace_placeholders from ..helpers import make_path_safe, clean_lines from ..helpers import interval, prune_within, prune_split from ..helpers import get_base_dir, get_cache_dir, get_keys_dir, get_security_dir, get_config_dir from ..helpers import is_slow_msgpack from ..helpers import msgpack, msgpack_fallback from ..helpers import yes, TRUISH, FALSISH, DEFAULTISH from ..helpers import StableDict, int_to_bigint, bigint_to_int, bin_to_hex from ..helpers import parse_timestamp, ChunkIteratorFileWrapper, ChunkerParams from ..helpers import ProgressIndicatorPercent, ProgressIndicatorEndless from ..helpers import swidth_slice from ..helpers import chunkit from ..helpers import safe_ns, safe_s, SUPPORT_32BIT_PLATFORMS from ..helpers import popen_with_error_handling from ..helpers import dash_open from . import BaseTestCase, FakeInputs class BigIntTestCase(BaseTestCase): def test_bigint(self): self.assert_equal(int_to_bigint(0), 0) self.assert_equal(int_to_bigint(2**63-1), 2**63-1) self.assert_equal(int_to_bigint(-2**63+1), -2**63+1) self.assert_equal(int_to_bigint(2**63), b'\x00\x00\x00\x00\x00\x00\x00\x80\x00') self.assert_equal(int_to_bigint(-2**63), b'\x00\x00\x00\x00\x00\x00\x00\x80\xff') self.assert_equal(bigint_to_int(int_to_bigint(-2**70)), -2**70) self.assert_equal(bigint_to_int(int_to_bigint(2**70)), 2**70) def test_bin_to_hex(): assert bin_to_hex(b'') == '' assert bin_to_hex(b'\x00\x01\xff') == '0001ff' class TestLocationWithoutEnv: @pytest.fixture def keys_dir(self, tmpdir, monkeypatch): tmpdir = str(tmpdir) monkeypatch.setenv('BORG_KEYS_DIR', tmpdir) if not tmpdir.endswith(os.path.sep): tmpdir += os.path.sep return tmpdir def test_ssh(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('ssh://user@host:1234/some/path::archive')) == \ "Location(proto='ssh', user='user', host='host', port=1234, path='/some/path', archive='archive')" assert Location('ssh://user@host:1234/some/path::archive').to_key_filename() == keys_dir + 'host__some_path' assert repr(Location('ssh://user@host:1234/some/path')) == \ "Location(proto='ssh', user='user', host='host', port=1234, path='/some/path', archive=None)" assert repr(Location('ssh://user@host/some/path')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive=None)" assert repr(Location('ssh://user@[::]:1234/some/path::archive')) == \ "Location(proto='ssh', user='user', host='::', port=1234, path='/some/path', archive='archive')" assert repr(Location('ssh://user@[::]:1234/some/path')) == \ "Location(proto='ssh', user='user', host='::', port=1234, path='/some/path', archive=None)" assert Location('ssh://user@[::]:1234/some/path').to_key_filename() == keys_dir + '____some_path' assert repr(Location('ssh://user@[::]/some/path')) == \ "Location(proto='ssh', user='user', host='::', port=None, path='/some/path', archive=None)" assert repr(Location('ssh://user@[2001:db8::]:1234/some/path::archive')) == \ "Location(proto='ssh', user='user', host='2001:db8::', port=1234, path='/some/path', archive='archive')" assert repr(Location('ssh://user@[2001:db8::]:1234/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::', port=1234, path='/some/path', archive=None)" assert Location('ssh://user@[2001:db8::]:1234/some/path').to_key_filename() == keys_dir + '2001_db8____some_path' assert repr(Location('ssh://user@[2001:db8::]/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::', port=None, path='/some/path', archive=None)" assert repr(Location('ssh://user@[2001:db8::c0:ffee]:1234/some/path::archive')) == \ "Location(proto='ssh', user='user', host='2001:db8::c0:ffee', port=1234, path='/some/path', archive='archive')" assert repr(Location('ssh://user@[2001:db8::c0:ffee]:1234/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::c0:ffee', port=1234, path='/some/path', archive=None)" assert repr(Location('ssh://user@[2001:db8::c0:ffee]/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::c0:ffee', port=None, path='/some/path', archive=None)" assert repr(Location('ssh://user@[2001:db8::192.0.2.1]:1234/some/path::archive')) == \ "Location(proto='ssh', user='user', host='2001:db8::192.0.2.1', port=1234, path='/some/path', archive='archive')" assert repr(Location('ssh://user@[2001:db8::192.0.2.1]:1234/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::192.0.2.1', port=1234, path='/some/path', archive=None)" assert repr(Location('ssh://user@[2001:db8::192.0.2.1]/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::192.0.2.1', port=None, path='/some/path', archive=None)" assert Location('ssh://user@[2001:db8::192.0.2.1]/some/path').to_key_filename() == keys_dir + '2001_db8__192_0_2_1__some_path' def test_file(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('file:///some/path::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/path', archive='archive')" assert repr(Location('file:///some/path')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/path', archive=None)" assert Location('file:///some/path').to_key_filename() == keys_dir + 'some_path' def test_scp(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('user@host:/some/path::archive')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive='archive')" assert repr(Location('user@host:/some/path')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive=None)" assert repr(Location('user@[::]:/some/path::archive')) == \ "Location(proto='ssh', user='user', host='::', port=None, path='/some/path', archive='archive')" assert repr(Location('user@[::]:/some/path')) == \ "Location(proto='ssh', user='user', host='::', port=None, path='/some/path', archive=None)" assert repr(Location('user@[2001:db8::]:/some/path::archive')) == \ "Location(proto='ssh', user='user', host='2001:db8::', port=None, path='/some/path', archive='archive')" assert repr(Location('user@[2001:db8::]:/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::', port=None, path='/some/path', archive=None)" assert repr(Location('user@[2001:db8::c0:ffee]:/some/path::archive')) == \ "Location(proto='ssh', user='user', host='2001:db8::c0:ffee', port=None, path='/some/path', archive='archive')" assert repr(Location('user@[2001:db8::c0:ffee]:/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::c0:ffee', port=None, path='/some/path', archive=None)" assert repr(Location('user@[2001:db8::192.0.2.1]:/some/path::archive')) == \ "Location(proto='ssh', user='user', host='2001:db8::192.0.2.1', port=None, path='/some/path', archive='archive')" assert repr(Location('user@[2001:db8::192.0.2.1]:/some/path')) == \ "Location(proto='ssh', user='user', host='2001:db8::192.0.2.1', port=None, path='/some/path', archive=None)" assert Location('user@[2001:db8::192.0.2.1]:/some/path').to_key_filename() == keys_dir + '2001_db8__192_0_2_1__some_path' def test_smb(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('file:////server/share/path::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='//server/share/path', archive='archive')" assert Location('file:////server/share/path::archive').to_key_filename() == keys_dir + 'server_share_path' def test_folder(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('path::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='path', archive='archive')" assert repr(Location('path')) == \ "Location(proto='file', user=None, host=None, port=None, path='path', archive=None)" assert Location('path').to_key_filename() == keys_dir + 'path' def test_long_path(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert Location(os.path.join(*(40 * ['path']))).to_key_filename() == keys_dir + '_'.join(20 * ['path']) + '_' def test_abspath(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('/some/absolute/path::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/absolute/path', archive='archive')" assert repr(Location('/some/absolute/path')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/absolute/path', archive=None)" assert Location('/some/absolute/path').to_key_filename() == keys_dir + 'some_absolute_path' assert repr(Location('ssh://user@host/some/path')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive=None)" assert Location('ssh://user@host/some/path').to_key_filename() == keys_dir + 'host__some_path' def test_relpath(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('some/relative/path::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='some/relative/path', archive='archive')" assert repr(Location('some/relative/path')) == \ "Location(proto='file', user=None, host=None, port=None, path='some/relative/path', archive=None)" assert Location('some/relative/path').to_key_filename() == keys_dir + 'some_relative_path' assert repr(Location('ssh://user@host/./some/path')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/./some/path', archive=None)" assert Location('ssh://user@host/./some/path').to_key_filename() == keys_dir + 'host__some_path' assert repr(Location('ssh://user@host/~/some/path')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/~/some/path', archive=None)" assert Location('ssh://user@host/~/some/path').to_key_filename() == keys_dir + 'host__some_path' assert repr(Location('ssh://user@host/~user/some/path')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/~user/some/path', archive=None)" assert Location('ssh://user@host/~user/some/path').to_key_filename() == keys_dir + 'host__user_some_path' def test_with_colons(self, monkeypatch, keys_dir): monkeypatch.delenv('BORG_REPO', raising=False) assert repr(Location('/abs/path:w:cols::arch:col')) == \ "Location(proto='file', user=None, host=None, port=None, path='/abs/path:w:cols', archive='arch:col')" assert repr(Location('/abs/path:with:colons::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='/abs/path:with:colons', archive='archive')" assert repr(Location('/abs/path:with:colons')) == \ "Location(proto='file', user=None, host=None, port=None, path='/abs/path:with:colons', archive=None)" assert Location('/abs/path:with:colons').to_key_filename() == keys_dir + 'abs_path_with_colons' def test_user_parsing(self): # see issue #1930 assert repr(Location('host:path::2016-12-31@23:59:59')) == \ "Location(proto='ssh', user=None, host='host', port=None, path='path', archive='2016-12-31@23:59:59')" assert repr(Location('ssh://host/path::2016-12-31@23:59:59')) == \ "Location(proto='ssh', user=None, host='host', port=None, path='/path', archive='2016-12-31@23:59:59')" def test_with_timestamp(self): assert repr(Location('path::archive-{utcnow}').with_timestamp(datetime(2002, 9, 19, tzinfo=timezone.utc))) == \ "Location(proto='file', user=None, host=None, port=None, path='path', archive='archive-2002-09-19T00:00:00')" def test_underspecified(self, monkeypatch): monkeypatch.delenv('BORG_REPO', raising=False) with pytest.raises(ValueError): Location('::archive') with pytest.raises(ValueError): Location('::') with pytest.raises(ValueError): Location() def test_no_slashes(self, monkeypatch): monkeypatch.delenv('BORG_REPO', raising=False) with pytest.raises(ValueError): Location('/some/path/to/repo::archive_name_with/slashes/is_invalid') def test_canonical_path(self, monkeypatch): monkeypatch.delenv('BORG_REPO', raising=False) locations = ['some/path::archive', 'file://some/path::archive', 'host:some/path::archive', 'host:~user/some/path::archive', 'ssh://host/some/path::archive', 'ssh://user@host:1234/some/path::archive'] for location in locations: assert Location(location).canonical_path() == \ Location(Location(location).canonical_path()).canonical_path(), "failed: %s" % location def test_format_path(self, monkeypatch): monkeypatch.delenv('BORG_REPO', raising=False) test_pid = os.getpid() assert repr(Location('/some/path::archive{pid}')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/path', archive='archive{}')".format(test_pid) location_time1 = Location('/some/path::archive{now:%s}') sleep(1.1) location_time2 = Location('/some/path::archive{now:%s}') assert location_time1.archive != location_time2.archive def test_bad_syntax(self): with pytest.raises(ValueError): # this is invalid due to the 2nd colon, correct: 'ssh://user@host/path' Location('ssh://user@host:/path') class TestLocationWithEnv: def test_ssh(self, monkeypatch): monkeypatch.setenv('BORG_REPO', 'ssh://user@host:1234/some/path') assert repr(Location('::archive')) == \ "Location(proto='ssh', user='user', host='host', port=1234, path='/some/path', archive='archive')" assert repr(Location('::')) == \ "Location(proto='ssh', user='user', host='host', port=1234, path='/some/path', archive=None)" assert repr(Location()) == \ "Location(proto='ssh', user='user', host='host', port=1234, path='/some/path', archive=None)" def test_file(self, monkeypatch): monkeypatch.setenv('BORG_REPO', 'file:///some/path') assert repr(Location('::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/path', archive='archive')" assert repr(Location('::')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/path', archive=None)" assert repr(Location()) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/path', archive=None)" def test_scp(self, monkeypatch): monkeypatch.setenv('BORG_REPO', 'user@host:/some/path') assert repr(Location('::archive')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive='archive')" assert repr(Location('::')) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive=None)" assert repr(Location()) == \ "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive=None)" def test_folder(self, monkeypatch): monkeypatch.setenv('BORG_REPO', 'path') assert repr(Location('::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='path', archive='archive')" assert repr(Location('::')) == \ "Location(proto='file', user=None, host=None, port=None, path='path', archive=None)" assert repr(Location()) == \ "Location(proto='file', user=None, host=None, port=None, path='path', archive=None)" def test_abspath(self, monkeypatch): monkeypatch.setenv('BORG_REPO', '/some/absolute/path') assert repr(Location('::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/absolute/path', archive='archive')" assert repr(Location('::')) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/absolute/path', archive=None)" assert repr(Location()) == \ "Location(proto='file', user=None, host=None, port=None, path='/some/absolute/path', archive=None)" def test_relpath(self, monkeypatch): monkeypatch.setenv('BORG_REPO', 'some/relative/path') assert repr(Location('::archive')) == \ "Location(proto='file', user=None, host=None, port=None, path='some/relative/path', archive='archive')" assert repr(Location('::')) == \ "Location(proto='file', user=None, host=None, port=None, path='some/relative/path', archive=None)" assert repr(Location()) == \ "Location(proto='file', user=None, host=None, port=None, path='some/relative/path', archive=None)" def test_with_colons(self, monkeypatch): monkeypatch.setenv('BORG_REPO', '/abs/path:w:cols') assert repr(Location('::arch:col')) == \ "Location(proto='file', user=None, host=None, port=None, path='/abs/path:w:cols', archive='arch:col')" assert repr(Location('::')) == \ "Location(proto='file', user=None, host=None, port=None, path='/abs/path:w:cols', archive=None)" assert repr(Location()) == \ "Location(proto='file', user=None, host=None, port=None, path='/abs/path:w:cols', archive=None)" def test_no_slashes(self, monkeypatch): monkeypatch.setenv('BORG_REPO', '/some/absolute/path') with pytest.raises(ValueError): Location('::archive_name_with/slashes/is_invalid') class FormatTimedeltaTestCase(BaseTestCase): def test(self): t0 = datetime(2001, 1, 1, 10, 20, 3, 0) t1 = datetime(2001, 1, 1, 12, 20, 4, 100000) self.assert_equal( format_timedelta(t1 - t0), '2 hours 1.10 seconds' ) def test_chunkerparams(): assert ChunkerParams('19,23,21,4095') == (19, 23, 21, 4095) assert ChunkerParams('10,23,16,4095') == (10, 23, 16, 4095) with pytest.raises(ValueError): ChunkerParams('19,24,21,4095') class MakePathSafeTestCase(BaseTestCase): def test(self): self.assert_equal(make_path_safe('/foo/bar'), 'foo/bar') self.assert_equal(make_path_safe('/foo/bar'), 'foo/bar') self.assert_equal(make_path_safe('/f/bar'), 'f/bar') self.assert_equal(make_path_safe('fo/bar'), 'fo/bar') self.assert_equal(make_path_safe('../foo/bar'), 'foo/bar') self.assert_equal(make_path_safe('../../foo/bar'), 'foo/bar') self.assert_equal(make_path_safe('/'), '.') self.assert_equal(make_path_safe('/'), '.') class MockArchive: def __init__(self, ts): self.ts = ts def __repr__(self): return repr(self.ts) class PruneSplitTestCase(BaseTestCase): def test(self): def local_to_UTC(month, day): """Convert noon on the month and day in 2013 to UTC.""" seconds = mktime(strptime('2013-%02d-%02d 12:00' % (month, day), '%Y-%m-%d %H:%M')) return datetime.fromtimestamp(seconds, tz=timezone.utc) def subset(lst, indices): return {lst[i] for i in indices} def dotest(test_archives, n, skip, indices): for ta in test_archives, reversed(test_archives): self.assert_equal(set(prune_split(ta, '%Y-%m', n, skip)), subset(test_archives, indices)) test_pairs = [(1, 1), (2, 1), (2, 28), (3, 1), (3, 2), (3, 31), (5, 1)] test_dates = [local_to_UTC(month, day) for month, day in test_pairs] test_archives = [MockArchive(date) for date in test_dates] dotest(test_archives, 3, [], [6, 5, 2]) dotest(test_archives, -1, [], [6, 5, 2, 0]) dotest(test_archives, 3, [test_archives[6]], [5, 2, 0]) dotest(test_archives, 3, [test_archives[5]], [6, 2, 0]) dotest(test_archives, 3, [test_archives[4]], [6, 5, 2]) dotest(test_archives, 0, [], []) class IntervalTestCase(BaseTestCase): def test_interval(self): self.assert_equal(interval('1H'), 1) self.assert_equal(interval('1d'), 24) self.assert_equal(interval('1w'), 168) self.assert_equal(interval('1m'), 744) self.assert_equal(interval('1y'), 8760) def test_interval_time_unit(self): with pytest.raises(ArgumentTypeError) as exc: interval('H') self.assert_equal( exc.value.args, ('Unexpected interval number "": expected an integer greater than 0',)) with pytest.raises(ArgumentTypeError) as exc: interval('-1d') self.assert_equal( exc.value.args, ('Unexpected interval number "-1": expected an integer greater than 0',)) with pytest.raises(ArgumentTypeError) as exc: interval('food') self.assert_equal( exc.value.args, ('Unexpected interval number "foo": expected an integer greater than 0',)) def test_interval_number(self): with pytest.raises(ArgumentTypeError) as exc: interval('5') self.assert_equal( exc.value.args, ("Unexpected interval time unit \"5\": expected one of ['H', 'd', 'w', 'm', 'y']",)) class PruneWithinTestCase(BaseTestCase): def test_prune_within(self): def subset(lst, indices): return {lst[i] for i in indices} def dotest(test_archives, within, indices): for ta in test_archives, reversed(test_archives): self.assert_equal(set(prune_within(ta, interval(within))), subset(test_archives, indices)) # 1 minute, 1.5 hours, 2.5 hours, 3.5 hours, 25 hours, 49 hours test_offsets = [60, 90*60, 150*60, 210*60, 25*60*60, 49*60*60] now = datetime.now(timezone.utc) test_dates = [now - timedelta(seconds=s) for s in test_offsets] test_archives = [MockArchive(date) for date in test_dates] dotest(test_archives, '1H', [0]) dotest(test_archives, '2H', [0, 1]) dotest(test_archives, '3H', [0, 1, 2]) dotest(test_archives, '24H', [0, 1, 2, 3]) dotest(test_archives, '26H', [0, 1, 2, 3, 4]) dotest(test_archives, '2d', [0, 1, 2, 3, 4]) dotest(test_archives, '50H', [0, 1, 2, 3, 4, 5]) dotest(test_archives, '3d', [0, 1, 2, 3, 4, 5]) dotest(test_archives, '1w', [0, 1, 2, 3, 4, 5]) dotest(test_archives, '1m', [0, 1, 2, 3, 4, 5]) dotest(test_archives, '1y', [0, 1, 2, 3, 4, 5]) class StableDictTestCase(BaseTestCase): def test(self): d = StableDict(foo=1, bar=2, boo=3, baz=4) self.assert_equal(list(d.items()), [('bar', 2), ('baz', 4), ('boo', 3), ('foo', 1)]) self.assert_equal(hashlib.md5(msgpack.packb(d)).hexdigest(), 'fc78df42cd60691b3ac3dd2a2b39903f') class TestParseTimestamp(BaseTestCase): def test(self): self.assert_equal(parse_timestamp('2015-04-19T20:25:00.226410'), datetime(2015, 4, 19, 20, 25, 0, 226410, timezone.utc)) self.assert_equal(parse_timestamp('2015-04-19T20:25:00'), datetime(2015, 4, 19, 20, 25, 0, 0, timezone.utc)) def test_get_base_dir(monkeypatch): """test that get_base_dir respects environment""" monkeypatch.delenv('BORG_BASE_DIR', raising=False) monkeypatch.delenv('HOME', raising=False) monkeypatch.delenv('USER', raising=False) assert get_base_dir() == os.path.expanduser('~') monkeypatch.setenv('USER', 'root') assert get_base_dir() == os.path.expanduser('~root') monkeypatch.setenv('HOME', '/var/tmp/home') assert get_base_dir() == '/var/tmp/home' monkeypatch.setenv('BORG_BASE_DIR', '/var/tmp/base') assert get_base_dir() == '/var/tmp/base' def test_get_config_dir(monkeypatch): """test that get_config_dir respects environment""" monkeypatch.delenv('BORG_CONFIG_DIR', raising=False) monkeypatch.delenv('XDG_CONFIG_HOME', raising=False) assert get_config_dir() == os.path.join(os.path.expanduser('~'), '.config', 'borg') monkeypatch.setenv('XDG_CONFIG_HOME', '/var/tmp/.config') assert get_config_dir() == os.path.join('/var/tmp/.config', 'borg') monkeypatch.setenv('BORG_CONFIG_DIR', '/var/tmp') assert get_config_dir() == '/var/tmp' def test_get_cache_dir(monkeypatch): """test that get_cache_dir respects environment""" monkeypatch.delenv('BORG_CACHE_DIR', raising=False) monkeypatch.delenv('XDG_CACHE_HOME', raising=False) assert get_cache_dir() == os.path.join(os.path.expanduser('~'), '.cache', 'borg') monkeypatch.setenv('XDG_CACHE_HOME', '/var/tmp/.cache') assert get_cache_dir() == os.path.join('/var/tmp/.cache', 'borg') monkeypatch.setenv('BORG_CACHE_DIR', '/var/tmp') assert get_cache_dir() == '/var/tmp' def test_get_keys_dir(monkeypatch): """test that get_keys_dir respects environment""" monkeypatch.delenv('BORG_KEYS_DIR', raising=False) monkeypatch.delenv('XDG_CONFIG_HOME', raising=False) assert get_keys_dir() == os.path.join(os.path.expanduser('~'), '.config', 'borg', 'keys') monkeypatch.setenv('XDG_CONFIG_HOME', '/var/tmp/.config') assert get_keys_dir() == os.path.join('/var/tmp/.config', 'borg', 'keys') monkeypatch.setenv('BORG_KEYS_DIR', '/var/tmp') assert get_keys_dir() == '/var/tmp' def test_get_security_dir(monkeypatch): """test that get_security_dir respects environment""" monkeypatch.delenv('BORG_SECURITY_DIR', raising=False) monkeypatch.delenv('XDG_CONFIG_HOME', raising=False) assert get_security_dir() == os.path.join(os.path.expanduser('~'), '.config', 'borg', 'security') assert get_security_dir(repository_id='1234') == os.path.join(os.path.expanduser('~'), '.config', 'borg', 'security', '1234') monkeypatch.setenv('XDG_CONFIG_HOME', '/var/tmp/.config') assert get_security_dir() == os.path.join('/var/tmp/.config', 'borg', 'security') monkeypatch.setenv('BORG_SECURITY_DIR', '/var/tmp') assert get_security_dir() == '/var/tmp' def test_file_size(): """test the size formatting routines""" si_size_map = { 0: '0 B', # no rounding necessary for those 1: '1 B', 142: '142 B', 999: '999 B', 1000: '1.00 kB', # rounding starts here 1001: '1.00 kB', # should be rounded away 1234: '1.23 kB', # should be rounded down 1235: '1.24 kB', # should be rounded up 1010: '1.01 kB', # rounded down as well 999990000: '999.99 MB', # rounded down 999990001: '999.99 MB', # rounded down 999995000: '1.00 GB', # rounded up to next unit 10**6: '1.00 MB', # and all the remaining units, megabytes 10**9: '1.00 GB', # gigabytes 10**12: '1.00 TB', # terabytes 10**15: '1.00 PB', # petabytes 10**18: '1.00 EB', # exabytes 10**21: '1.00 ZB', # zottabytes 10**24: '1.00 YB', # yottabytes -1: '-1 B', # negative value -1010: '-1.01 kB', # negative value with rounding } for size, fmt in si_size_map.items(): assert format_file_size(size) == fmt def test_file_size_precision(): assert format_file_size(1234, precision=1) == '1.2 kB' # rounded down assert format_file_size(1254, precision=1) == '1.3 kB' # rounded up assert format_file_size(999990000, precision=1) == '1.0 GB' # and not 999.9 MB or 1000.0 MB def test_file_size_sign(): si_size_map = { 0: '0 B', 1: '+1 B', 1234: '+1.23 kB', -1: '-1 B', -1234: '-1.23 kB', } for size, fmt in si_size_map.items(): assert format_file_size(size, sign=True) == fmt @pytest.mark.parametrize('string,value', ( ('1', 1), ('20', 20), ('5K', 5000), ('1.75M', 1750000), ('1e+9', 1e9), ('-1T', -1e12), )) def test_parse_file_size(string, value): assert parse_file_size(string) == int(value) @pytest.mark.parametrize('string', ( '', '5 Äpfel', '4E', '2229 bit', '1B', )) def test_parse_file_size_invalid(string): with pytest.raises(ValueError): parse_file_size(string) def test_is_slow_msgpack(): saved_packer = msgpack.Packer try: msgpack.Packer = msgpack_fallback.Packer assert is_slow_msgpack() finally: msgpack.Packer = saved_packer # this tests that we have fast msgpack on test platform: assert not is_slow_msgpack() class TestBuffer: def test_type(self): buffer = Buffer(bytearray) assert isinstance(buffer.get(), bytearray) buffer = Buffer(bytes) # don't do that in practice assert isinstance(buffer.get(), bytes) def test_len(self): buffer = Buffer(bytearray, size=0) b = buffer.get() assert len(buffer) == len(b) == 0 buffer = Buffer(bytearray, size=1234) b = buffer.get() assert len(buffer) == len(b) == 1234 def test_resize(self): buffer = Buffer(bytearray, size=100) assert len(buffer) == 100 b1 = buffer.get() buffer.resize(200) assert len(buffer) == 200 b2 = buffer.get() assert b2 is not b1 # new, bigger buffer buffer.resize(100) assert len(buffer) >= 100 b3 = buffer.get() assert b3 is b2 # still same buffer (200) buffer.resize(100, init=True) assert len(buffer) == 100 # except on init b4 = buffer.get() assert b4 is not b3 # new, smaller buffer def test_limit(self): buffer = Buffer(bytearray, size=100, limit=200) buffer.resize(200) assert len(buffer) == 200 with pytest.raises(Buffer.MemoryLimitExceeded): buffer.resize(201) assert len(buffer) == 200 def test_get(self): buffer = Buffer(bytearray, size=100, limit=200) b1 = buffer.get(50) assert len(b1) >= 50 # == 100 b2 = buffer.get(100) assert len(b2) >= 100 # == 100 assert b2 is b1 # did not need resizing yet b3 = buffer.get(200) assert len(b3) == 200 assert b3 is not b2 # new, resized buffer with pytest.raises(Buffer.MemoryLimitExceeded): buffer.get(201) # beyond limit assert len(buffer) == 200 def test_yes_input(): inputs = list(TRUISH) input = FakeInputs(inputs) for i in inputs: assert yes(input=input) inputs = list(FALSISH) input = FakeInputs(inputs) for i in inputs: assert not yes(input=input) def test_yes_input_defaults(): inputs = list(DEFAULTISH) input = FakeInputs(inputs) for i in inputs: assert yes(default=True, input=input) input = FakeInputs(inputs) for i in inputs: assert not yes(default=False, input=input) def test_yes_input_custom(): input = FakeInputs(['YES', 'SURE', 'NOPE', ]) assert yes(truish=('YES', ), input=input) assert yes(truish=('SURE', ), input=input) assert not yes(falsish=('NOPE', ), input=input) def test_yes_env(monkeypatch): for value in TRUISH: monkeypatch.setenv('OVERRIDE_THIS', value) assert yes(env_var_override='OVERRIDE_THIS') for value in FALSISH: monkeypatch.setenv('OVERRIDE_THIS', value) assert not yes(env_var_override='OVERRIDE_THIS') def test_yes_env_default(monkeypatch): for value in DEFAULTISH: monkeypatch.setenv('OVERRIDE_THIS', value) assert yes(env_var_override='OVERRIDE_THIS', default=True) assert not yes(env_var_override='OVERRIDE_THIS', default=False) def test_yes_defaults(): input = FakeInputs(['invalid', '', ' ']) assert not yes(input=input) # default=False assert not yes(input=input) assert not yes(input=input) input = FakeInputs(['invalid', '', ' ']) assert yes(default=True, input=input) assert yes(default=True, input=input) assert yes(default=True, input=input) input = FakeInputs([]) assert yes(default=True, input=input) assert not yes(default=False, input=input) with pytest.raises(ValueError): yes(default=None) def test_yes_retry(): input = FakeInputs(['foo', 'bar', TRUISH[0], ]) assert yes(retry_msg='Retry: ', input=input) input = FakeInputs(['foo', 'bar', FALSISH[0], ]) assert not yes(retry_msg='Retry: ', input=input) def test_yes_no_retry(): input = FakeInputs(['foo', 'bar', TRUISH[0], ]) assert not yes(retry=False, default=False, input=input) input = FakeInputs(['foo', 'bar', FALSISH[0], ]) assert yes(retry=False, default=True, input=input) def test_yes_output(capfd): input = FakeInputs(['invalid', 'y', 'n']) assert yes(msg='intro-msg', false_msg='false-msg', true_msg='true-msg', retry_msg='retry-msg', input=input) out, err = capfd.readouterr() assert out == '' assert 'intro-msg' in err assert 'retry-msg' in err assert 'true-msg' in err assert not yes(msg='intro-msg', false_msg='false-msg', true_msg='true-msg', retry_msg='retry-msg', input=input) out, err = capfd.readouterr() assert out == '' assert 'intro-msg' in err assert 'retry-msg' not in err assert 'false-msg' in err def test_yes_env_output(capfd, monkeypatch): env_var = 'OVERRIDE_SOMETHING' monkeypatch.setenv(env_var, 'yes') assert yes(env_var_override=env_var) out, err = capfd.readouterr() assert out == '' assert env_var in err assert 'yes' in err def test_progress_percentage_sameline(capfd, monkeypatch): # run the test as if it was in a 4x1 terminal monkeypatch.setenv('COLUMNS', '4') monkeypatch.setenv('LINES', '1') pi = ProgressIndicatorPercent(1000, step=5, start=0, msg="%3.0f%%") pi.logger.setLevel('INFO') pi.show(0) out, err = capfd.readouterr() assert err == ' 0%\r' pi.show(420) pi.show(680) out, err = capfd.readouterr() assert err == ' 42%\r 68%\r' pi.show(1000) out, err = capfd.readouterr() assert err == '100%\r' pi.finish() out, err = capfd.readouterr() assert err == ' ' * 4 + '\r' def test_progress_percentage_step(capfd, monkeypatch): # run the test as if it was in a 4x1 terminal monkeypatch.setenv('COLUMNS', '4') monkeypatch.setenv('LINES', '1') pi = ProgressIndicatorPercent(100, step=2, start=0, msg="%3.0f%%") pi.logger.setLevel('INFO') pi.show() out, err = capfd.readouterr() assert err == ' 0%\r' pi.show() out, err = capfd.readouterr() assert err == '' # no output at 1% as we have step == 2 pi.show() out, err = capfd.readouterr() assert err == ' 2%\r' def test_progress_percentage_quiet(capfd): pi = ProgressIndicatorPercent(1000, step=5, start=0, msg="%3.0f%%") pi.logger.setLevel('WARN') pi.show(0) out, err = capfd.readouterr() assert err == '' pi.show(1000) out, err = capfd.readouterr() assert err == '' pi.finish() out, err = capfd.readouterr() assert err == '' def test_progress_endless(capfd): pi = ProgressIndicatorEndless(step=1, file=sys.stderr) pi.show() out, err = capfd.readouterr() assert err == '.' pi.show() out, err = capfd.readouterr() assert err == '.' pi.finish() out, err = capfd.readouterr() assert err == '\n' def test_progress_endless_step(capfd): pi = ProgressIndicatorEndless(step=2, file=sys.stderr) pi.show() out, err = capfd.readouterr() assert err == '' # no output here as we have step == 2 pi.show() out, err = capfd.readouterr() assert err == '.' pi.show() out, err = capfd.readouterr() assert err == '' # no output here as we have step == 2 pi.show() out, err = capfd.readouterr() assert err == '.' def test_partial_format(): assert partial_format('{space:10}', {'space': ' '}) == ' ' * 10 assert partial_format('{foobar}', {'bar': 'wrong', 'foobar': 'correct'}) == 'correct' assert partial_format('{unknown_key}', {}) == '{unknown_key}' assert partial_format('{key}{{escaped_key}}', {}) == '{key}{{escaped_key}}' assert partial_format('{{escaped_key}}', {'escaped_key': 1234}) == '{{escaped_key}}' def test_chunk_file_wrapper(): cfw = ChunkIteratorFileWrapper(iter([b'abc', b'def'])) assert cfw.read(2) == b'ab' assert cfw.read(50) == b'cdef' assert cfw.exhausted cfw = ChunkIteratorFileWrapper(iter([])) assert cfw.read(2) == b'' assert cfw.exhausted def test_chunkit(): it = chunkit('abcdefg', 3) assert next(it) == ['a', 'b', 'c'] assert next(it) == ['d', 'e', 'f'] assert next(it) == ['g'] with pytest.raises(StopIteration): next(it) with pytest.raises(StopIteration): next(it) it = chunkit('ab', 3) assert list(it) == [['a', 'b']] it = chunkit('', 3) assert list(it) == [] def test_clean_lines(): conf = """\ #comment data1 #data1 data2 data3 """.splitlines(keepends=True) assert list(clean_lines(conf)) == ['data1 #data1', 'data2', 'data3', ] assert list(clean_lines(conf, lstrip=False)) == ['data1 #data1', 'data2', ' data3', ] assert list(clean_lines(conf, rstrip=False)) == ['data1 #data1\n', 'data2\n', 'data3\n', ] assert list(clean_lines(conf, remove_empty=False)) == ['data1 #data1', 'data2', '', 'data3', ] assert list(clean_lines(conf, remove_comments=False)) == ['#comment', 'data1 #data1', 'data2', 'data3', ] def test_format_line(): data = dict(foo='bar baz') assert format_line('', data) == '' assert format_line('{foo}', data) == 'bar baz' assert format_line('foo{foo}foo', data) == 'foobar bazfoo' def test_format_line_erroneous(): data = dict() with pytest.raises(PlaceholderError): assert format_line('{invalid}', data) with pytest.raises(PlaceholderError): assert format_line('{}', data) with pytest.raises(PlaceholderError): assert format_line('{now!r}', data) with pytest.raises(PlaceholderError): assert format_line('{now.__class__.__module__.__builtins__}', data) def test_replace_placeholders(): now = datetime.now() assert " " not in replace_placeholders('{now}') assert int(replace_placeholders('{now:%Y}')) == now.year def test_override_placeholders(): assert replace_placeholders('{uuid4}', overrides={'uuid4': "overridden"}) == "overridden" def working_swidth(): return platform.swidth('선') == 2 @pytest.mark.skipif(not working_swidth(), reason='swidth() is not supported / active') def test_swidth_slice(): string = '나윤선나윤선나윤선나윤선나윤선' assert swidth_slice(string, 1) == '' assert swidth_slice(string, -1) == '' assert swidth_slice(string, 4) == '나윤' assert swidth_slice(string, -4) == '윤선' @pytest.mark.skipif(not working_swidth(), reason='swidth() is not supported / active') def test_swidth_slice_mixed_characters(): string = '나윤a선나윤선나윤선나윤선나윤선' assert swidth_slice(string, 5) == '나윤a' assert swidth_slice(string, 6) == '나윤a' def test_safe_timestamps(): if SUPPORT_32BIT_PLATFORMS: # ns fit into int64 assert safe_ns(2 ** 64) <= 2 ** 63 - 1 assert safe_ns(-1) == 0 # s fit into int32 assert safe_s(2 ** 64) <= 2 ** 31 - 1 assert safe_s(-1) == 0 # datetime won't fall over its y10k problem beyond_y10k = 2 ** 100 with pytest.raises(OverflowError): datetime.utcfromtimestamp(beyond_y10k) assert datetime.utcfromtimestamp(safe_s(beyond_y10k)) > datetime(2038, 1, 1) assert datetime.utcfromtimestamp(safe_ns(beyond_y10k) / 1000000000) > datetime(2038, 1, 1) else: # ns fit into int64 assert safe_ns(2 ** 64) <= 2 ** 63 - 1 assert safe_ns(-1) == 0 # s are so that their ns conversion fits into int64 assert safe_s(2 ** 64) * 1000000000 <= 2 ** 63 - 1 assert safe_s(-1) == 0 # datetime won't fall over its y10k problem beyond_y10k = 2 ** 100 with pytest.raises(OverflowError): datetime.utcfromtimestamp(beyond_y10k) assert datetime.utcfromtimestamp(safe_s(beyond_y10k)) > datetime(2262, 1, 1) assert datetime.utcfromtimestamp(safe_ns(beyond_y10k) / 1000000000) > datetime(2262, 1, 1) class TestPopenWithErrorHandling: @pytest.mark.skipif(not shutil.which('test'), reason='"test" binary is needed') def test_simple(self): proc = popen_with_error_handling('test 1') assert proc.wait() == 0 @pytest.mark.skipif(shutil.which('borg-foobar-test-notexist'), reason='"borg-foobar-test-notexist" binary exists (somehow?)') def test_not_found(self): proc = popen_with_error_handling('borg-foobar-test-notexist 1234') assert proc is None @pytest.mark.parametrize('cmd', ( 'mismatched "quote', 'foo --bar="baz', '' )) def test_bad_syntax(self, cmd): proc = popen_with_error_handling(cmd) assert proc is None def test_shell(self): with pytest.raises(AssertionError): popen_with_error_handling('', shell=True) def test_dash_open(): assert dash_open('-', 'r') is sys.stdin assert dash_open('-', 'w') is sys.stdout assert dash_open('-', 'rb') is sys.stdin.buffer assert dash_open('-', 'wb') is sys.stdout.buffer borgbackup-1.1.15/src/borg/testsuite/version.py0000644000175000017500000000340713771325506021521 0ustar useruser00000000000000import pytest from ..version import parse_version, format_version @pytest.mark.parametrize("version_str, version_tuple", [ # setuptools < 8.0 uses "-" ('1.0.0a1.dev204-g8866961.d20170606', (1, 0, 0, -4, 1)), ('1.0.0a1.dev204-g8866961', (1, 0, 0, -4, 1)), ('1.0.0-d20170606', (1, 0, 0, -1)), # setuptools >= 8.0 uses "+" ('1.0.0a1.dev204+g8866961.d20170606', (1, 0, 0, -4, 1)), ('1.0.0a1.dev204+g8866961', (1, 0, 0, -4, 1)), ('1.0.0+d20170606', (1, 0, 0, -1)), # pre-release versions: ('1.0.0a1', (1, 0, 0, -4, 1)), ('1.0.0a2', (1, 0, 0, -4, 2)), ('1.0.0b3', (1, 0, 0, -3, 3)), ('1.0.0rc4', (1, 0, 0, -2, 4)), # release versions: ('0.0.0', (0, 0, 0, -1)), ('0.0.11', (0, 0, 11, -1)), ('0.11.0', (0, 11, 0, -1)), ('11.0.0', (11, 0, 0, -1)), ]) def test_parse_version(version_str, version_tuple): assert parse_version(version_str) == version_tuple def test_parse_version_invalid(): with pytest.raises(ValueError): assert parse_version('') # we require x.y.z versions with pytest.raises(ValueError): assert parse_version('1') # we require x.y.z versions with pytest.raises(ValueError): assert parse_version('1.2') # we require x.y.z versions with pytest.raises(ValueError): assert parse_version('crap') @pytest.mark.parametrize("version_str, version_tuple", [ ('1.0.0a1', (1, 0, 0, -4, 1)), ('1.0.0', (1, 0, 0, -1)), ('1.0.0a2', (1, 0, 0, -4, 2)), ('1.0.0b3', (1, 0, 0, -3, 3)), ('1.0.0rc4', (1, 0, 0, -2, 4)), ('0.0.0', (0, 0, 0, -1)), ('0.0.11', (0, 0, 11, -1)), ('0.11.0', (0, 11, 0, -1)), ('11.0.0', (11, 0, 0, -1)), ]) def test_format_version(version_str, version_tuple): assert format_version(version_tuple) == version_str borgbackup-1.1.15/src/borg/testsuite/benchmark.py0000644000175000017500000000545713771325506021775 0ustar useruser00000000000000""" Do benchmarks using pytest-benchmark. Usage: py.test --benchmark-only """ import os import pytest from .archiver import changedir, cmd @pytest.fixture def repo_url(request, tmpdir, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', '123456') monkeypatch.setenv('BORG_CHECK_I_KNOW_WHAT_I_AM_DOING', 'YES') monkeypatch.setenv('BORG_DELETE_I_KNOW_WHAT_I_AM_DOING', 'YES') monkeypatch.setenv('BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK', 'yes') monkeypatch.setenv('BORG_KEYS_DIR', str(tmpdir.join('keys'))) monkeypatch.setenv('BORG_CACHE_DIR', str(tmpdir.join('cache'))) yield str(tmpdir.join('repository')) tmpdir.remove(rec=1) @pytest.fixture(params=["none", "repokey"]) def repo(request, cmd, repo_url): cmd('init', '--encryption', request.param, repo_url) return repo_url @pytest.fixture(scope='session', params=["zeros", "random"]) def testdata(request, tmpdir_factory): count, size = 10, 1000*1000 p = tmpdir_factory.mktemp('data') data_type = request.param if data_type == 'zeros': # do not use a binary zero (\0) to avoid sparse detection def data(size): return b'0' * size elif data_type == 'random': def data(size): return os.urandom(size) else: raise ValueError("data_type must be 'random' or 'zeros'.") for i in range(count): with open(str(p.join(str(i))), "wb") as f: f.write(data(size)) yield str(p) p.remove(rec=1) @pytest.fixture(params=['none', 'lz4']) def archive(request, cmd, repo, testdata): archive_url = repo + '::test' cmd('create', '--compression', request.param, archive_url, testdata) return archive_url def test_create_none(benchmark, cmd, repo, testdata): result, out = benchmark.pedantic(cmd, ('create', '--compression', 'none', repo + '::test', testdata)) assert result == 0 def test_create_lz4(benchmark, cmd, repo, testdata): result, out = benchmark.pedantic(cmd, ('create', '--compression', 'lz4', repo + '::test', testdata)) assert result == 0 def test_extract(benchmark, cmd, archive, tmpdir): with changedir(str(tmpdir)): result, out = benchmark.pedantic(cmd, ('extract', archive)) assert result == 0 def test_delete(benchmark, cmd, archive): result, out = benchmark.pedantic(cmd, ('delete', archive)) assert result == 0 def test_list(benchmark, cmd, archive): result, out = benchmark(cmd, 'list', archive) assert result == 0 def test_info(benchmark, cmd, archive): result, out = benchmark(cmd, 'info', archive) assert result == 0 def test_check(benchmark, cmd, archive): repo = archive.split('::')[0] result, out = benchmark(cmd, 'check', repo) assert result == 0 def test_help(benchmark, cmd): result, out = benchmark(cmd, 'help') assert result == 0 borgbackup-1.1.15/src/borg/testsuite/xattr.py0000644000175000017500000000553113771325506021176 0ustar useruser00000000000000import os import tempfile import unittest import pytest from ..xattr import is_enabled, getxattr, setxattr, listxattr, buffer, split_lstring from . import BaseTestCase @unittest.skipUnless(is_enabled(), 'xattr not enabled on filesystem') class XattrTestCase(BaseTestCase): def setUp(self): self.tmpfile = tempfile.NamedTemporaryFile() self.symlink = self.tmpfile.name + '.symlink' os.symlink(self.tmpfile.name, self.symlink) def tearDown(self): os.unlink(self.symlink) def assert_equal_se(self, is_x, want_x): # check 2 xattr lists for equality, but ignore security.selinux attr is_x = set(is_x) - {'security.selinux'} want_x = set(want_x) self.assert_equal(is_x, want_x) def test(self): self.assert_equal_se(listxattr(self.tmpfile.name), []) self.assert_equal_se(listxattr(self.tmpfile.fileno()), []) self.assert_equal_se(listxattr(self.symlink), []) setxattr(self.tmpfile.name, 'user.foo', b'bar') setxattr(self.tmpfile.fileno(), 'user.bar', b'foo') setxattr(self.tmpfile.name, 'user.empty', None) self.assert_equal_se(listxattr(self.tmpfile.name), ['user.foo', 'user.bar', 'user.empty']) self.assert_equal_se(listxattr(self.tmpfile.fileno()), ['user.foo', 'user.bar', 'user.empty']) self.assert_equal_se(listxattr(self.symlink), ['user.foo', 'user.bar', 'user.empty']) self.assert_equal_se(listxattr(self.symlink, follow_symlinks=False), []) self.assert_equal(getxattr(self.tmpfile.name, 'user.foo'), b'bar') self.assert_equal(getxattr(self.tmpfile.fileno(), 'user.foo'), b'bar') self.assert_equal(getxattr(self.symlink, 'user.foo'), b'bar') self.assert_equal(getxattr(self.tmpfile.name, 'user.empty'), None) def test_listxattr_buffer_growth(self): # make it work even with ext4, which imposes rather low limits buffer.resize(size=64, init=True) # xattr raw key list will be > 64 keys = ['user.attr%d' % i for i in range(20)] for key in keys: setxattr(self.tmpfile.name, key, b'x') got_keys = listxattr(self.tmpfile.name) self.assert_equal_se(got_keys, keys) self.assert_true(len(buffer) > 64) def test_getxattr_buffer_growth(self): # make it work even with ext4, which imposes rather low limits buffer.resize(size=64, init=True) value = b'x' * 126 setxattr(self.tmpfile.name, 'user.big', value) got_value = getxattr(self.tmpfile.name, 'user.big') self.assert_equal(value, got_value) self.assert_equal(len(buffer), 128) @pytest.mark.parametrize('lstring, splitted', ( (b'', []), (b'\x00', [b'']), (b'\x01a', [b'a']), (b'\x01a\x02cd', [b'a', b'cd']), )) def test_split_lstring(lstring, splitted): assert split_lstring(lstring) == splitted borgbackup-1.1.15/src/borg/testsuite/remote.py0000644000175000017500000001443413771325506021331 0ustar useruser00000000000000import errno import os import io import time from unittest.mock import patch import pytest from ..remote import SleepingBandwidthLimiter, RepositoryCache, cache_if_remote from ..repository import Repository from ..crypto.key import PlaintextKey from ..compress import CompressionSpec from ..helpers import IntegrityError from .hashindex import H from .key import TestKey class TestSleepingBandwidthLimiter: def expect_write(self, fd, data): self.expected_fd = fd self.expected_data = data def check_write(self, fd, data): assert fd == self.expected_fd assert data == self.expected_data return len(data) def test_write_unlimited(self, monkeypatch): monkeypatch.setattr(os, "write", self.check_write) it = SleepingBandwidthLimiter(0) self.expect_write(5, b"test") it.write(5, b"test") def test_write(self, monkeypatch): monkeypatch.setattr(os, "write", self.check_write) monkeypatch.setattr(time, "monotonic", lambda: now) monkeypatch.setattr(time, "sleep", lambda x: None) now = 100 it = SleepingBandwidthLimiter(100) # all fits self.expect_write(5, b"test") it.write(5, b"test") # only partial write self.expect_write(5, b"123456") it.write(5, b"1234567890") # sleeps self.expect_write(5, b"123456") it.write(5, b"123456") # long time interval between writes now += 10 self.expect_write(5, b"1") it.write(5, b"1") # long time interval between writes, filling up quota now += 10 self.expect_write(5, b"1") it.write(5, b"1") # long time interval between writes, filling up quota to clip to maximum now += 10 self.expect_write(5, b"1") it.write(5, b"1") class TestRepositoryCache: @pytest.fixture def repository(self, tmpdir): self.repository_location = os.path.join(str(tmpdir), 'repository') with Repository(self.repository_location, exclusive=True, create=True) as repository: repository.put(H(1), b'1234') repository.put(H(2), b'5678') repository.put(H(3), bytes(100)) yield repository @pytest.fixture def cache(self, repository): return RepositoryCache(repository) def test_simple(self, cache: RepositoryCache): # Single get()s are not cached, since they are used for unique objects like archives. assert cache.get(H(1)) == b'1234' assert cache.misses == 1 assert cache.hits == 0 assert list(cache.get_many([H(1)])) == [b'1234'] assert cache.misses == 2 assert cache.hits == 0 assert list(cache.get_many([H(1)])) == [b'1234'] assert cache.misses == 2 assert cache.hits == 1 assert cache.get(H(1)) == b'1234' assert cache.misses == 2 assert cache.hits == 2 def test_backoff(self, cache: RepositoryCache): def query_size_limit(): cache.size_limit = 0 assert list(cache.get_many([H(1), H(2)])) == [b'1234', b'5678'] assert cache.misses == 2 assert cache.evictions == 0 iterator = cache.get_many([H(1), H(3), H(2)]) assert next(iterator) == b'1234' # Force cache to back off qsl = cache.query_size_limit cache.query_size_limit = query_size_limit cache.backoff() cache.query_size_limit = qsl # Evicted H(1) and H(2) assert cache.evictions == 2 assert H(1) not in cache.cache assert H(2) not in cache.cache assert next(iterator) == bytes(100) assert cache.slow_misses == 0 # Since H(2) was in the cache when we called get_many(), but has # been evicted during iterating the generator, it will be a slow miss. assert next(iterator) == b'5678' assert cache.slow_misses == 1 def test_enospc(self, cache: RepositoryCache): class enospc_open: def __init__(self, *args): pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def write(self, data): raise OSError(errno.ENOSPC, 'foo') def truncate(self, n=None): pass iterator = cache.get_many([H(1), H(2), H(3)]) assert next(iterator) == b'1234' with patch('builtins.open', enospc_open): assert next(iterator) == b'5678' assert cache.enospc == 1 # We didn't patch query_size_limit which would set size_limit to some low # value, so nothing was actually evicted. assert cache.evictions == 0 assert next(iterator) == bytes(100) @pytest.fixture def key(self, repository, monkeypatch): monkeypatch.setenv('BORG_PASSPHRASE', 'test') key = PlaintextKey.create(repository, TestKey.MockArgs()) key.compressor = CompressionSpec('none').compressor return key def _put_encrypted_object(self, key, repository, data): id_ = key.id_hash(data) repository.put(id_, key.encrypt(data)) return id_ @pytest.fixture def H1(self, key, repository): return self._put_encrypted_object(key, repository, b'1234') @pytest.fixture def H2(self, key, repository): return self._put_encrypted_object(key, repository, b'5678') @pytest.fixture def H3(self, key, repository): return self._put_encrypted_object(key, repository, bytes(100)) @pytest.fixture def decrypted_cache(self, key, repository): return cache_if_remote(repository, decrypted_cache=key, force_cache=True) def test_cache_corruption(self, decrypted_cache: RepositoryCache, H1, H2, H3): list(decrypted_cache.get_many([H1, H2, H3])) iterator = decrypted_cache.get_many([H1, H2, H3]) assert next(iterator) == (7, b'1234') with open(decrypted_cache.key_filename(H2), 'a+b') as fd: fd.seek(-1, io.SEEK_END) corrupted = (int.from_bytes(fd.read(), 'little') ^ 2).to_bytes(1, 'little') fd.seek(-1, io.SEEK_END) fd.write(corrupted) fd.truncate() with pytest.raises(IntegrityError): assert next(iterator) == (7, b'5678') borgbackup-1.1.15/src/borg/testsuite/nonces.py0000644000175000017500000002070113771325506021315 0ustar useruser00000000000000import os.path import pytest from ..crypto import nonces from ..crypto.nonces import NonceManager from ..crypto.key import bin_to_hex from ..helpers import get_security_dir from ..remote import InvalidRPCMethod class TestNonceManager: class MockRepository: class _Location: orig = '/some/place' _location = _Location() id = bytes(32) id_str = bin_to_hex(id) def get_free_nonce(self): return self.next_free def commit_nonce_reservation(self, next_unreserved, start_nonce): assert start_nonce == self.next_free self.next_free = next_unreserved class MockOldRepository(MockRepository): def get_free_nonce(self): raise InvalidRPCMethod("") def commit_nonce_reservation(self, next_unreserved, start_nonce): pytest.fail("commit_nonce_reservation should never be called on an old repository") class MockEncCipher: def __init__(self, iv): self.iv_set = False # placeholder, this is never a valid iv self.iv = iv def reset(self, key, iv): assert key is None assert iv is not False self.iv_set = iv self.iv = iv def expect_iv_and_advance(self, expected_iv, advance): expected_iv = expected_iv.to_bytes(16, byteorder='big') iv_set = self.iv_set assert iv_set == expected_iv self.iv_set = False self.iv = advance.to_bytes(16, byteorder='big') def expect_no_reset_and_advance(self, advance): iv_set = self.iv_set assert iv_set is False self.iv = advance.to_bytes(16, byteorder='big') def setUp(self): self.repository = None def cache_nonce(self): with open(os.path.join(get_security_dir(self.repository.id_str), 'nonce'), "r") as fd: return fd.read() def set_cache_nonce(self, nonce): with open(os.path.join(get_security_dir(self.repository.id_str), 'nonce'), "w") as fd: assert fd.write(nonce) def test_empty_cache_and_old_server(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x2000) self.repository = self.MockOldRepository() manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2013) assert self.cache_nonce() == "0000000000002033" def test_empty_cache(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x2000) self.repository = self.MockRepository() self.repository.next_free = 0x2000 manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2013) assert self.cache_nonce() == "0000000000002033" def test_empty_nonce(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x2000) self.repository = self.MockRepository() self.repository.next_free = None manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2000 + 19) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 # enough space in reservation manager.ensure_reservation(13) enc_cipher.expect_no_reset_and_advance(0x2000 + 19 + 13) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 # just barely enough space in reservation manager.ensure_reservation(19) enc_cipher.expect_no_reset_and_advance(0x2000 + 19 + 13 + 19) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 # no space in reservation manager.ensure_reservation(16) enc_cipher.expect_no_reset_and_advance(0x2000 + 19 + 13 + 19 + 16) assert self.cache_nonce() == "0000000000002063" assert self.repository.next_free == 0x2063 # spans reservation boundary manager.ensure_reservation(64) enc_cipher.expect_no_reset_and_advance(0x2000 + 19 + 13 + 19 + 16 + 64) assert self.cache_nonce() == "00000000000020c3" assert self.repository.next_free == 0x20c3 def test_sync_nonce(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x2000) self.repository = self.MockRepository() self.repository.next_free = 0x2000 self.set_cache_nonce("0000000000002000") manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2000 + 19) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 def test_server_just_upgraded(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x2000) self.repository = self.MockRepository() self.repository.next_free = None self.set_cache_nonce("0000000000002000") manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2000 + 19) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 def test_transaction_abort_no_cache(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x1000) self.repository = self.MockRepository() self.repository.next_free = 0x2000 manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2000 + 19) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 def test_transaction_abort_old_server(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x1000) self.repository = self.MockOldRepository() self.set_cache_nonce("0000000000002000") manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2000 + 19) assert self.cache_nonce() == "0000000000002033" def test_transaction_abort_on_other_client(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x1000) self.repository = self.MockRepository() self.repository.next_free = 0x2000 self.set_cache_nonce("0000000000001000") manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2000 + 19) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 def test_interleaved(self, monkeypatch): monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20) enc_cipher = self.MockEncCipher(0x2000) self.repository = self.MockRepository() self.repository.next_free = 0x2000 self.set_cache_nonce("0000000000002000") manager = NonceManager(self.repository, enc_cipher, 0x2000) manager.ensure_reservation(19) enc_cipher.expect_iv_and_advance(0x2000, 0x2000 + 19) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x2033 # somehow the clients unlocks, another client reserves and this client relocks self.repository.next_free = 0x4000 # enough space in reservation manager.ensure_reservation(12) enc_cipher.expect_no_reset_and_advance(0x2000 + 19 + 12) assert self.cache_nonce() == "0000000000002033" assert self.repository.next_free == 0x4000 # spans reservation boundary manager.ensure_reservation(21) enc_cipher.expect_iv_and_advance(0x4000, 0x4000 + 21) assert self.cache_nonce() == "0000000000004035" assert self.repository.next_free == 0x4035 borgbackup-1.1.15/src/borg/testsuite/archiver.py0000644000175000017500000061362513771325506021650 0ustar useruser00000000000000import argparse import errno import io import json import logging import os import pstats import random import shutil import socket import stat import subprocess import sys import tempfile import time import re import unittest from binascii import unhexlify, b2a_base64 from configparser import ConfigParser from datetime import datetime from datetime import timezone from datetime import timedelta from hashlib import sha256 from io import BytesIO, StringIO from unittest.mock import patch import pytest try: import llfuse except ImportError: pass import borg from .. import xattr, helpers, platform from ..archive import Archive, ChunkBuffer, flags_noatime, flags_normal from ..archiver import Archiver, parse_storage_quota, PURE_PYTHON_MSGPACK_WARNING from ..cache import Cache, LocalCache from ..constants import * # NOQA from ..crypto.low_level import bytes_to_long, num_aes_blocks from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile from ..crypto.file_integrity import FileIntegrityError from ..helpers import Location, get_security_dir from ..helpers import Manifest, MandatoryFeatureUnsupported from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR from ..helpers import bin_to_hex from ..helpers import MAX_S from ..helpers import msgpack from ..nanorst import RstToTextLazy, rst_to_terminal from ..patterns import IECommand, PatternMatcher, parse_pattern from ..item import Item from ..locking import LockFailed from ..logger import setup_logging from ..remote import RemoteRepository, PathNotAllowed from ..repository import Repository from . import has_lchflags, has_llfuse from . import BaseTestCase, changedir, environment_variable, no_selinux from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported from .platform import fakeroot_detected from .upgrader import make_attic_repo from . import key src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw): if fork: try: if exe is None: borg = (sys.executable, '-m', 'borg.archiver') elif isinstance(exe, str): borg = (exe, ) elif not isinstance(exe, tuple): raise ValueError('exe must be None, a tuple or a str') output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input) ret = 0 except subprocess.CalledProcessError as e: output = e.output ret = e.returncode except SystemExit as e: # possibly raised by argparse output = '' ret = e.code if binary_output: return ret, output else: return ret, os.fsdecode(output) else: stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr try: sys.stdin = StringIO(input.decode()) sys.stdin.buffer = BytesIO(input) output = BytesIO() # Always use utf-8 here, to simply .decode() below output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8') if archiver is None: archiver = Archiver() archiver.prerun_checks = lambda *args: None archiver.exit_code = EXIT_SUCCESS helpers.exit_code = EXIT_SUCCESS try: args = archiver.parse_args(list(args)) # argparse parsing may raise SystemExit when the command line is bad or # actions that abort early (eg. --help) where given. Catch this and return # the error code as-if we invoked a Borg binary. except SystemExit as e: output_text.flush() return e.code, output.getvalue() if binary_output else output.getvalue().decode() ret = archiver.run(args) output_text.flush() return ret, output.getvalue() if binary_output else output.getvalue().decode() finally: sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr def have_gnutar(): if not shutil.which('tar'): return False popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE) stdout, stderr = popen.communicate() return b'GNU tar' in stdout # check if the binary "borg.exe" is available (for local testing a symlink to virtualenv/bin/borg should do) try: exec_cmd('help', exe='borg.exe', fork=True) BORG_EXES = ['python', 'binary', ] except FileNotFoundError: BORG_EXES = ['python', ] @pytest.fixture(params=BORG_EXES) def cmd(request): if request.param == 'python': exe = None elif request.param == 'binary': exe = 'borg.exe' else: raise ValueError("param must be 'python' or 'binary'") def exec_fn(*args, **kw): return exec_cmd(*args, exe=exe, fork=True, **kw) return exec_fn def test_return_codes(cmd, tmpdir): repo = tmpdir.mkdir('repo') input = tmpdir.mkdir('input') output = tmpdir.mkdir('output') input.join('test_file').write('content') rc, out = cmd('init', '--encryption=none', '%s' % str(repo)) assert rc == EXIT_SUCCESS rc, out = cmd('create', '%s::archive' % repo, str(input)) assert rc == EXIT_SUCCESS with changedir(str(output)): rc, out = cmd('extract', '%s::archive' % repo) assert rc == EXIT_SUCCESS rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match') assert rc == EXIT_WARNING # pattern did not match rc, out = cmd('create', '%s::archive' % repo, str(input)) assert rc == EXIT_ERROR # duplicate archive name """ test_disk_full is very slow and not recommended to be included in daily testing. for this test, an empty, writable 16MB filesystem mounted on DF_MOUNT is required. for speed and other reasons, it is recommended that the underlying block device is in RAM, not a magnetic or flash disk. assuming /tmp is a tmpfs (in memory filesystem), one can use this: dd if=/dev/zero of=/tmp/borg-disk bs=16M count=1 mkfs.ext4 /tmp/borg-disk mkdir /tmp/borg-mount sudo mount /tmp/borg-disk /tmp/borg-mount if the directory does not exist, the test will be skipped. """ DF_MOUNT = '/tmp/borg-mount' @pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT) def test_disk_full(cmd): def make_files(dir, count, size, rnd=True): shutil.rmtree(dir, ignore_errors=True) os.mkdir(dir) if rnd: count = random.randint(1, count) if size > 1: size = random.randint(1, size) for i in range(count): fn = os.path.join(dir, "file%03d" % i) with open(fn, 'wb') as f: data = os.urandom(size) f.write(data) with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'): mount = DF_MOUNT assert os.path.exists(mount) repo = os.path.join(mount, 'repo') input = os.path.join(mount, 'input') reserve = os.path.join(mount, 'reserve') for j in range(100): shutil.rmtree(repo, ignore_errors=True) shutil.rmtree(input, ignore_errors=True) # keep some space and some inodes in reserve that we can free up later: make_files(reserve, 80, 100000, rnd=False) rc, out = cmd('init', repo) if rc != EXIT_SUCCESS: print('init', rc, out) assert rc == EXIT_SUCCESS try: success, i = True, 0 while success: i += 1 try: make_files(input, 20, 200000) except OSError as err: if err.errno == errno.ENOSPC: # already out of space break raise try: rc, out = cmd('create', '%s::test%03d' % (repo, i), input) success = rc == EXIT_SUCCESS if not success: print('create', rc, out) finally: # make sure repo is not locked shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True) os.remove(os.path.join(repo, 'lock.roster')) finally: # now some error happened, likely we are out of disk space. # free some space so we can expect borg to be able to work normally: shutil.rmtree(reserve, ignore_errors=True) rc, out = cmd('list', repo) if rc != EXIT_SUCCESS: print('list', rc, out) rc, out = cmd('check', '--repair', repo) if rc != EXIT_SUCCESS: print('check', rc, out) assert rc == EXIT_SUCCESS class ArchiverTestCaseBase(BaseTestCase): EXE = None # python source based FORK_DEFAULT = False prefix = '' def setUp(self): os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES' os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES' os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests' self.archiver = not self.FORK_DEFAULT and Archiver() or None self.tmpdir = tempfile.mkdtemp() self.repository_path = os.path.join(self.tmpdir, 'repository') self.repository_location = self.prefix + self.repository_path self.input_path = os.path.join(self.tmpdir, 'input') self.output_path = os.path.join(self.tmpdir, 'output') self.keys_path = os.path.join(self.tmpdir, 'keys') self.cache_path = os.path.join(self.tmpdir, 'cache') self.exclude_file_path = os.path.join(self.tmpdir, 'excludes') self.patterns_file_path = os.path.join(self.tmpdir, 'patterns') os.environ['BORG_KEYS_DIR'] = self.keys_path os.environ['BORG_CACHE_DIR'] = self.cache_path os.mkdir(self.input_path) os.chmod(self.input_path, 0o777) # avoid troubles with fakeroot / FUSE os.mkdir(self.output_path) os.mkdir(self.keys_path) os.mkdir(self.cache_path) with open(self.exclude_file_path, 'wb') as fd: fd.write(b'input/file2\n# A comment line, then a blank line\n\n') with open(self.patterns_file_path, 'wb') as fd: fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n') self._old_wd = os.getcwd() os.chdir(self.tmpdir) def tearDown(self): os.chdir(self._old_wd) # note: ignore_errors=True as workaround for issue #862 shutil.rmtree(self.tmpdir, ignore_errors=True) setup_logging() def cmd(self, *args, **kw): exit_code = kw.pop('exit_code', 0) fork = kw.pop('fork', None) binary_output = kw.get('binary_output', False) if fork is None: fork = self.FORK_DEFAULT ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw) if ret != exit_code: print(output) self.assert_equal(ret, exit_code) # if tests are run with the pure-python msgpack, there will be warnings about # this in the output, which would make a lot of tests fail. pp_msg = PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING empty = b'' if binary_output else '' output = empty.join(line for line in output.splitlines(keepends=True) if pp_msg not in line) return output def create_src_archive(self, name): self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir) def open_archive(self, name): repository = Repository(self.repository_path, exclusive=True) with repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) archive = Archive(repository, key, manifest, name) return archive, repository def open_repository(self): return Repository(self.repository_path, exclusive=True) def create_regular_file(self, name, size=0, contents=None): assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match' filename = os.path.join(self.input_path, name) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'wb') as fd: if contents is None: contents = b'X' * size fd.write(contents) def create_test_files(self): """Create a minimal test case including all supported file types """ # File self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('flagfile', size=1024) # Directory self.create_regular_file('dir2/file2', size=1024 * 80) # File mode os.chmod('input/file1', 0o4755) # Hard link if are_hardlinks_supported(): os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'hardlink')) # Symlink if are_symlinks_supported(): os.symlink('somewhere', os.path.join(self.input_path, 'link1')) self.create_regular_file('fusexattr', size=1) if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path): # ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs # will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False. # This is because fakeroot with xattr-support does not propagate xattrs of the underlying file # into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file # (from fakeroots point of view) they are invisible to the test process inside the fakeroot. xattr.setxattr(os.path.join(self.input_path, 'fusexattr'), 'user.foo', b'bar') xattr.setxattr(os.path.join(self.input_path, 'fusexattr'), 'user.empty', b'') # XXX this always fails for me # ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot # same for newer ubuntu and centos. # if this is supported just on specific platform, platform should be checked first, # so that the test setup for all tests using it does not fail here always for others. # xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False) # FIFO node if are_fifos_supported(): os.mkfifo(os.path.join(self.input_path, 'fifo1')) if has_lchflags: platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP) try: # Block device os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20)) # Char device os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40)) # File mode os.chmod('input/dir2', 0o555) # if we take away write perms, we need root to remove contents # File owner os.chown('input/file1', 100, 200) # raises OSError invalid argument on cygwin have_root = True # we have (fake)root except PermissionError: have_root = False except OSError as e: # Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem. if e.errno not in (errno.EINVAL, errno.ENOSYS): raise have_root = False time.sleep(1) # "empty" must have newer timestamp than other files self.create_regular_file('empty', size=0) return have_root class ArchiverTestCase(ArchiverTestCaseBase): def test_basic_functionality(self): have_root = self.create_test_files() # fork required to test show-rc output output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True) self.assert_in('borgbackup version', output) self.assert_in('terminating with success status, rc 0', output) self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input') output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input') self.assert_in('Archive name: test.2', output) self.assert_in('This archive: ', output) with changedir('output'): self.cmd('extract', self.repository_location + '::test') list_output = self.cmd('list', '--short', self.repository_location) self.assert_in('test', list_output) self.assert_in('test.2', list_output) expected = [ 'input', 'input/bdev', 'input/cdev', 'input/dir2', 'input/dir2/file2', 'input/empty', 'input/file1', 'input/flagfile', ] if are_fifos_supported(): expected.append('input/fifo1') if are_symlinks_supported(): expected.append('input/link1') if are_hardlinks_supported(): expected.append('input/hardlink') if not have_root: # we could not create these device files without (fake)root expected.remove('input/bdev') expected.remove('input/cdev') if has_lchflags: # remove the file we did not backup, so input and output become equal expected.remove('input/flagfile') # this file is UF_NODUMP os.remove(os.path.join('input', 'flagfile')) list_output = self.cmd('list', '--short', self.repository_location + '::test') for name in expected: self.assert_in(name, list_output) self.assert_dirs_equal('input', 'output/input') info_output = self.cmd('info', self.repository_location + '::test') item_count = 4 if has_lchflags else 5 # one file is UF_NODUMP self.assert_in('Number of files: %d' % item_count, info_output) shutil.rmtree(self.cache_path) info_output2 = self.cmd('info', self.repository_location + '::test') def filter(output): # filter for interesting "info" output, ignore cache rebuilding related stuff prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:', 'All archives:', 'Chunk index:', ] result = [] for line in output.splitlines(): for prefix in prefixes: if line.startswith(prefix): result.append(line) return '\n'.join(result) # the interesting parts of info_output2 and info_output should be same self.assert_equal(filter(info_output), filter(info_output2)) def test_init_parent_dirs(self): parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2') repository_path = os.path.join(parent_path, 'repository') repository_location = self.prefix + repository_path with pytest.raises(Repository.ParentPathDoesNotExist): # normal borg init does NOT create missing parent dirs self.cmd('init', '--encryption=none', repository_location) # but if told so, it does: self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location) assert os.path.exists(parent_path) def test_unix_socket(self): self.cmd('init', '--encryption=repokey', self.repository_location) try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.bind(os.path.join(self.input_path, 'unix-socket')) except PermissionError as err: if err.errno == errno.EPERM: pytest.skip('unix sockets disabled or not supported') elif err.errno == errno.EACCES: pytest.skip('permission denied to create unix sockets') self.cmd('create', self.repository_location + '::test', 'input') sock.close() with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert not os.path.exists('input/unix-socket') @pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported') def test_symlink_extract(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert os.readlink('input/link1') == 'somewhere' @pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime') def test_atime(self): def has_noatime(some_file): atime_before = os.stat(some_file).st_atime_ns try: with open(os.open(some_file, flags_noatime)) as file: file.read() except PermissionError: return False else: atime_after = os.stat(some_file).st_atime_ns noatime_used = flags_noatime != flags_normal return noatime_used and atime_before == atime_after self.create_test_files() atime, mtime = 123456780, 234567890 have_noatime = has_noatime('input/file1') os.utime('input/file1', (atime, mtime)) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') sti = os.stat('input/file1') sto = os.stat('output/input/file1') assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9 if have_noatime: assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9 else: # it touched the input file's atime while backing it up assert sto.st_atime_ns == atime * 1e9 @pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime') @pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime') def test_birthtime(self): self.create_test_files() birthtime, mtime, atime = 946598400, 946684800, 946771200 os.utime('input/file1', (atime, birthtime)) os.utime('input/file1', (atime, mtime)) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') sti = os.stat('input/file1') sto = os.stat('output/input/file1') assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9 assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9 @pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime') @pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime') def test_nobirthtime(self): self.create_test_files() birthtime, mtime, atime = 946598400, 946684800, 946771200 os.utime('input/file1', (atime, birthtime)) os.utime('input/file1', (atime, mtime)) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') sti = os.stat('input/file1') sto = os.stat('output/input/file1') assert int(sti.st_birthtime * 1e9) == birthtime * 1e9 assert int(sto.st_birthtime * 1e9) == mtime * 1e9 assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9 def _extract_repository_id(self, path): with Repository(self.repository_path) as repository: return repository.id def _set_repository_id(self, path, id): config = ConfigParser(interpolation=None) config.read(os.path.join(path, 'config')) config.set('repository', 'id', bin_to_hex(id)) with open(os.path.join(path, 'config'), 'w') as fd: config.write(fd) with Repository(self.repository_path) as repository: return repository.id def test_sparse_file(self): def is_sparse(fn, total_size, hole_size): st = os.stat(fn) assert st.st_size == total_size sparse = True if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size: sparse = False if sparse and hasattr(os, 'SEEK_HOLE') and hasattr(os, 'SEEK_DATA'): with open(fn, 'rb') as fd: # only check if the first hole is as expected, because the 2nd hole check # is problematic on xfs due to its "dynamic speculative EOF preallocation try: if fd.seek(0, os.SEEK_HOLE) != 0: sparse = False if fd.seek(0, os.SEEK_DATA) != hole_size: sparse = False except OSError: # OS/FS does not really support SEEK_HOLE/SEEK_DATA sparse = False return sparse filename = os.path.join(self.input_path, 'sparse') content = b'foobar' hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers total_size = hole_size + len(content) + hole_size with open(filename, 'wb') as fd: # create a file that has a hole at the beginning and end (if the # OS and filesystem supports sparse files) fd.seek(hole_size, 1) fd.write(content) fd.seek(hole_size, 1) pos = fd.tell() fd.truncate(pos) # we first check if we could create a sparse input file: sparse_support = is_sparse(filename, total_size, hole_size) if sparse_support: # we could create a sparse input file, so creating a backup of it and # extracting it again (as sparse) should also work: self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir(self.output_path): self.cmd('extract', '--sparse', self.repository_location + '::test') self.assert_dirs_equal('input', 'output/input') filename = os.path.join(self.output_path, 'input', 'sparse') with open(filename, 'rb') as fd: # check if file contents are as expected self.assert_equal(fd.read(hole_size), b'\0' * hole_size) self.assert_equal(fd.read(len(content)), content) self.assert_equal(fd.read(hole_size), b'\0' * hole_size) self.assert_true(is_sparse(filename, total_size, hole_size)) def test_unusual_filenames(self): filenames = ['normal', 'with some blanks', '(with_parens)', ] for filename in filenames: filename = os.path.join(self.input_path, filename) with open(filename, 'wb'): pass self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') for filename in filenames: with changedir('output'): self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename)) assert os.path.exists(os.path.join('output', 'input', filename)) def test_repository_swap_detection(self): self.create_test_files() os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = self._extract_repository_id(self.repository_path) self.cmd('create', self.repository_location + '::test', 'input') shutil.rmtree(self.repository_path) self.cmd('init', '--encryption=none', self.repository_location) self._set_repository_id(self.repository_path, repository_id) self.assert_equal(repository_id, self._extract_repository_id(self.repository_path)) if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.EncryptionMethodMismatch): self.cmd('create', self.repository_location + '::test.2', 'input') def test_repository_swap_detection2(self): self.create_test_files() self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted') os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted') self.cmd('create', self.repository_location + '_encrypted::test', 'input') shutil.rmtree(self.repository_path + '_encrypted') os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted') if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.RepositoryAccessAborted): self.cmd('create', self.repository_location + '_encrypted::test.2', 'input') def test_repository_swap_detection_no_cache(self): self.create_test_files() os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = self._extract_repository_id(self.repository_path) self.cmd('create', self.repository_location + '::test', 'input') shutil.rmtree(self.repository_path) self.cmd('init', '--encryption=none', self.repository_location) self._set_repository_id(self.repository_path, repository_id) self.assert_equal(repository_id, self._extract_repository_id(self.repository_path)) self.cmd('delete', '--cache-only', self.repository_location) if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.EncryptionMethodMismatch): self.cmd('create', self.repository_location + '::test.2', 'input') def test_repository_swap_detection2_no_cache(self): self.create_test_files() self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted') os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted') self.cmd('create', self.repository_location + '_encrypted::test', 'input') self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted') self.cmd('delete', '--cache-only', self.repository_location + '_encrypted') shutil.rmtree(self.repository_path + '_encrypted') os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted') if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.RepositoryAccessAborted): self.cmd('create', self.repository_location + '_encrypted::test.2', 'input') def test_repository_swap_detection_repokey_blank_passphrase(self): # Check that a repokey repo with a blank passphrase is considered like a plaintext repo. self.create_test_files() # User initializes her repository with her passphrase self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') # Attacker replaces it with her own repository, which is encrypted but has no passphrase set shutil.rmtree(self.repository_path) with environment_variable(BORG_PASSPHRASE=''): self.cmd('init', '--encryption=repokey', self.repository_location) # Delete cache & security database, AKA switch to user perspective self.cmd('delete', '--cache-only', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) shutil.rmtree(get_security_dir(repository_id)) with environment_variable(BORG_PASSPHRASE=None): # This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE # is set, while it isn't. Previously this raised no warning, # since the repository is, technically, encrypted. if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.CacheInitAbortedError): self.cmd('create', self.repository_location + '::test.2', 'input') def test_repository_move(self): self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) os.rename(self.repository_path, self.repository_path + '_new') with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'): self.cmd('info', self.repository_location + '_new') security_dir = get_security_dir(repository_id) with open(os.path.join(security_dir, 'location')) as fd: location = fd.read() assert location == Location(self.repository_location + '_new').canonical_path() # Needs no confirmation anymore self.cmd('info', self.repository_location + '_new') shutil.rmtree(self.cache_path) self.cmd('info', self.repository_location + '_new') shutil.rmtree(security_dir) self.cmd('info', self.repository_location + '_new') for file in ('location', 'key-type', 'manifest-timestamp'): assert os.path.exists(os.path.join(security_dir, file)) def test_security_dir_compat(self): self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) security_dir = get_security_dir(repository_id) with open(os.path.join(security_dir, 'location'), 'w') as fd: fd.write('something outdated') # This is fine, because the cache still has the correct information. security_dir and cache can disagree # if older versions are used to confirm a renamed repository. self.cmd('info', self.repository_location) def test_unknown_unencrypted(self): self.cmd('init', '--encryption=none', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) security_dir = get_security_dir(repository_id) # Ok: repository is known self.cmd('info', self.repository_location) # Ok: repository is still known (through security_dir) shutil.rmtree(self.cache_path) self.cmd('info', self.repository_location) # Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~) shutil.rmtree(self.cache_path) shutil.rmtree(security_dir) if self.FORK_DEFAULT: self.cmd('info', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises(Cache.CacheInitAbortedError): self.cmd('info', self.repository_location) with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'): self.cmd('info', self.repository_location) def test_strip_components(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('dir/file') self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '3') self.assert_true(not os.path.exists('file')) with self.assert_creates_file('file'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '2') with self.assert_creates_file('dir/file'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '1') with self.assert_creates_file('input/dir/file'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '0') def _extract_hardlinks_setup(self): os.mkdir(os.path.join(self.input_path, 'dir1')) os.mkdir(os.path.join(self.input_path, 'dir1/subdir')) self.create_regular_file('source', contents=b'123456') os.link(os.path.join(self.input_path, 'source'), os.path.join(self.input_path, 'abba')) os.link(os.path.join(self.input_path, 'source'), os.path.join(self.input_path, 'dir1/hardlink')) os.link(os.path.join(self.input_path, 'source'), os.path.join(self.input_path, 'dir1/subdir/hardlink')) self.create_regular_file('dir1/source2') os.link(os.path.join(self.input_path, 'dir1/source2'), os.path.join(self.input_path, 'dir1/aaaa')) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') @requires_hardlinks @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_mount_hardlinks(self): self._extract_hardlinks_setup() mountpoint = os.path.join(self.tmpdir, 'mountpoint') # we need to get rid of permissions checking because fakeroot causes issues with it. # On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions". # On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse. if sys.platform == 'darwin': ignore_perms = ['-o', 'ignore_permissions,defer_permissions'] else: ignore_perms = ['-o', 'ignore_permissions'] with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \ changedir(mountpoint): assert os.stat('hardlink').st_nlink == 2 assert os.stat('subdir/hardlink').st_nlink == 2 assert open('subdir/hardlink', 'rb').read() == b'123456' assert os.stat('aaaa').st_nlink == 2 assert os.stat('source2').st_nlink == 2 with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \ changedir(mountpoint): assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \ changedir(mountpoint): assert os.stat('input/source').st_nlink == 4 assert os.stat('input/abba').st_nlink == 4 assert os.stat('input/dir1/hardlink').st_nlink == 4 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' @requires_hardlinks def test_extract_hardlinks1(self): self._extract_hardlinks_setup() with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert os.stat('input/source').st_nlink == 4 assert os.stat('input/abba').st_nlink == 4 assert os.stat('input/dir1/hardlink').st_nlink == 4 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' @requires_hardlinks def test_extract_hardlinks2(self): self._extract_hardlinks_setup() with changedir('output'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '2') assert os.stat('hardlink').st_nlink == 2 assert os.stat('subdir/hardlink').st_nlink == 2 assert open('subdir/hardlink', 'rb').read() == b'123456' assert os.stat('aaaa').st_nlink == 2 assert os.stat('source2').st_nlink == 2 with changedir('output'): self.cmd('extract', self.repository_location + '::test', 'input/dir1') assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 def test_extract_include_exclude(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test', 'input/file1', ) self.assert_equal(sorted(os.listdir('output/input')), ['file1']) with changedir('output'): self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) def test_extract_include_exclude_regex(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.create_regular_file('file333', size=1024 * 80) # Create with regular expression exclusion for file4 self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333']) shutil.rmtree('output/input') # Extract with regular expression exclusion with changedir('output'): self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2']) shutil.rmtree('output/input') # Combine --exclude with fnmatch and regular expression with changedir('output'): self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333']) shutil.rmtree('output/input') # Combine --exclude-from and regular expression exclusion with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1', '--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file3']) def test_extract_include_exclude_regex_from_file(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.create_regular_file('file333', size=1024 * 80) self.create_regular_file('aa:something', size=1024 * 80) # Create while excluding using mixed pattern styles with open(self.exclude_file_path, 'wb') as fd: fd.write(b're:input/file4$\n') fd.write(b'fm:*aa:*thing\n') self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333']) shutil.rmtree('output/input') # Exclude using regular expression with open(self.exclude_file_path, 'wb') as fd: fd.write(b're:file3+\n') with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2']) shutil.rmtree('output/input') # Mixed exclude pattern styles with open(self.exclude_file_path, 'wb') as fd: fd.write(b're:file(\\d)\\1\\1$\n') fd.write(b'fm:nothingwillmatchthis\n') fd.write(b'*/file1\n') fd.write(b're:file2$\n') with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file3']) def test_extract_with_pattern(self): self.cmd("init", '--encryption=repokey', self.repository_location) self.create_regular_file("file1", size=1024 * 80) self.create_regular_file("file2", size=1024 * 80) self.create_regular_file("file3", size=1024 * 80) self.create_regular_file("file4", size=1024 * 80) self.create_regular_file("file333", size=1024 * 80) self.cmd("create", self.repository_location + "::test", "input") # Extract everything with regular expression with changedir("output"): self.cmd("extract", self.repository_location + "::test", "re:.*") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"]) shutil.rmtree("output/input") # Extract with pattern while also excluding files with changedir("output"): self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"]) shutil.rmtree("output/input") # Combine --exclude with pattern for extraction with changedir("output"): self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$") self.assert_equal(sorted(os.listdir("output/input")), ["file2"]) shutil.rmtree("output/input") # Multiple pattern with changedir("output"): self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"]) def test_extract_list_output(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file', size=1024 * 80) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('extract', self.repository_location + '::test') self.assert_not_in("input/file", output) shutil.rmtree('output/input') with changedir('output'): output = self.cmd('extract', '--info', self.repository_location + '::test') self.assert_not_in("input/file", output) shutil.rmtree('output/input') with changedir('output'): output = self.cmd('extract', '--list', self.repository_location + '::test') self.assert_in("input/file", output) shutil.rmtree('output/input') with changedir('output'): output = self.cmd('extract', '--list', '--info', self.repository_location + '::test') self.assert_in("input/file", output) def test_extract_progress(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file', size=1024 * 80) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('extract', self.repository_location + '::test', '--progress') assert 'Extracting:' in output def _create_test_caches(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('cache1/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') self.create_regular_file('cache2/%s' % CACHE_TAG_NAME, contents=b'invalid signature') os.mkdir('input/cache3') if are_hardlinks_supported(): os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME) else: self.create_regular_file('cache3/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') def test_create_stdin(self): self.cmd('init', '--encryption=repokey', self.repository_location) input_data = b'\x00foo\n\nbar\n \n' self.cmd('create', self.repository_location + '::test', '-', input=input_data) item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test')) assert item['uid'] == 0 assert item['gid'] == 0 assert item['size'] == len(input_data) assert item['path'] == 'stdin' extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True) assert extracted_data == input_data def test_create_without_root(self): """test create without a root""" self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', exit_code=2) def test_create_pattern_root(self): """test create with only a root pattern""" self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test') self.assert_in("A input/file1", output) self.assert_in("A input/file2", output) def test_create_pattern(self): """test file patterns during create""" self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file_important', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--pattern=+input/file_important', '--pattern=-input/file*', self.repository_location + '::test', 'input') self.assert_in("A input/file_important", output) self.assert_in('x input/file1', output) self.assert_in('x input/file2', output) def test_create_pattern_file(self): """test file patterns during create""" self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('otherfile', size=1024 * 80) self.create_regular_file('file_important', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path, self.repository_location + '::test', 'input') self.assert_in("A input/file_important", output) self.assert_in('x input/file1', output) self.assert_in('x input/file2', output) self.assert_in('x input/otherfile', output) def test_create_pattern_exclude_folder_but_recurse(self): """test when patterns exclude a parent folder, but include a child""" self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2') with open(self.patterns_file_path2, 'wb') as fd: fd.write(b'+ input/x/b\n- input/x*\n') self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('x/a/foo_a', size=1024 * 80) self.create_regular_file('x/b/foo_b', size=1024 * 80) self.create_regular_file('y/foo_y', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--patterns-from=' + self.patterns_file_path2, self.repository_location + '::test', 'input') self.assert_in('x input/x/a/foo_a', output) self.assert_in("A input/x/b/foo_b", output) self.assert_in('A input/y/foo_y', output) def test_create_pattern_exclude_folder_no_recurse(self): """test when patterns exclude a parent folder and, but include a child""" self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2') with open(self.patterns_file_path2, 'wb') as fd: fd.write(b'+ input/x/b\n! input/x*\n') self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('x/a/foo_a', size=1024 * 80) self.create_regular_file('x/b/foo_b', size=1024 * 80) self.create_regular_file('y/foo_y', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--patterns-from=' + self.patterns_file_path2, self.repository_location + '::test', 'input') self.assert_not_in('input/x/a/foo_a', output) self.assert_not_in('input/x/a', output) self.assert_in('A input/y/foo_y', output) def test_create_pattern_intermediate_folders_first(self): """test that intermediate folders appear first when patterns exclude a parent folder but include a child""" self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2') with open(self.patterns_file_path2, 'wb') as fd: fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n') self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('x/a/foo_a', size=1024 * 80) self.create_regular_file('x/b/foo_b', size=1024 * 80) with changedir('input'): self.cmd('create', '--patterns-from=' + self.patterns_file_path2, self.repository_location + '::test', '.') # list the archive and verify that the "intermediate" folders appear before # their contents out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test') out_list = out.splitlines() self.assert_in('d x/a', out_list) self.assert_in('d x/b', out_list) assert out_list.index('d x/a') < out_list.index('- x/a/foo_a') assert out_list.index('d x/b') < out_list.index('- x/b/foo_b') def test_create_no_cache_sync(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('delete', '--cache-only', self.repository_location) create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input', '--json', '--error')) # ignore experimental warning info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json')) create_stats = create_json['cache']['stats'] info_stats = info_json['cache']['stats'] assert create_stats == info_stats self.cmd('delete', '--cache-only', self.repository_location) self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input') self.cmd('info', self.repository_location) self.cmd('check', self.repository_location) def test_extract_pattern_opt(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file_important', size=1024 * 80) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', '--pattern=+input/file_important', '--pattern=-input/file*', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file_important']) def _assert_test_caches(self): with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1']) self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME]) def test_exclude_caches(self): self._create_test_caches() self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input') self._assert_test_caches() def test_recreate_exclude_caches(self): self._create_test_caches() self.cmd('create', self.repository_location + '::test', 'input') self.cmd('recreate', '--exclude-caches', self.repository_location + '::test') self._assert_test_caches() def _create_test_tagged(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('tagged1/.NOBACKUP') self.create_regular_file('tagged2/00-NOBACKUP') self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024) def _assert_test_tagged(self): with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1']) def test_exclude_tagged(self): self._create_test_tagged() self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input') self._assert_test_tagged() def test_recreate_exclude_tagged(self): self._create_test_tagged() self.cmd('create', self.repository_location + '::test', 'input') self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test') self._assert_test_tagged() def _create_test_keep_tagged(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file0', size=1024) self.create_regular_file('tagged1/.NOBACKUP1') self.create_regular_file('tagged1/file1', size=1024) self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024) self.create_regular_file('tagged2/file2', size=1024) self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') self.create_regular_file('tagged3/file3', size=1024) self.create_regular_file('taggedall/.NOBACKUP1') self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024) self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') self.create_regular_file('taggedall/file4', size=1024) def _assert_test_keep_tagged(self): with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall']) self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1']) self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2']) self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME]) self.assert_equal(sorted(os.listdir('output/input/taggedall')), ['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ]) def test_exclude_keep_tagged_deprecation(self): self.cmd('init', '--encryption=repokey', self.repository_location) output_warn = self.cmd('create', '--exclude-caches', '--keep-tag-files', self.repository_location + '::test', src_dir) self.assert_in('--keep-tag-files" has been deprecated.', output_warn) def test_exclude_keep_tagged(self): self._create_test_keep_tagged() self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input') self._assert_test_keep_tagged() def test_recreate_exclude_keep_tagged(self): self._create_test_keep_tagged() self.cmd('create', self.repository_location + '::test', 'input') self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test') self._assert_test_keep_tagged() @pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') def test_recreate_hardlinked_tags(self): # test for issue #4911 self.cmd('init', '--encryption=none', self.repository_location) self.create_regular_file('file1', contents=CACHE_TAG_CONTENTS) # "wrong" filename, but correct tag contents os.mkdir(os.path.join(self.input_path, 'subdir')) # to make sure the tag is encountered *after* file1 os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'subdir', CACHE_TAG_NAME)) # correct tag name, hardlink to file1 self.cmd('create', self.repository_location + '::test', 'input') # in the "test" archive, we now have, in this order: # - a regular file item for "file1" # - a hardlink item for "CACHEDIR.TAG" referring back to file1 for its contents self.cmd('recreate', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test') # if issue #4911 is present, the recreate will crash with a KeyError for "input/file1" @pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2') def test_extract_capabilities(self): fchown = os.fchown # We need to manually patch chown to get the behaviour Linux has, since fakeroot does not # accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them. def patched_fchown(fd, uid, gid): xattr.setxattr(fd, 'security.capability', None, follow_symlinks=False) fchown(fd, uid, gid) # The capability descriptor used here is valid and taken from a /usr/bin/ping capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' self.create_regular_file('file') xattr.setxattr('input/file', 'security.capability', capabilities) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): with patch.object(os, 'fchown', patched_fchown): self.cmd('extract', self.repository_location + '::test') assert xattr.getxattr('input/file', 'security.capability') == capabilities @pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of' 'fakeroot') def test_extract_xattrs_errors(self): def patched_setxattr_E2BIG(*args, **kwargs): raise OSError(errno.E2BIG, 'E2BIG') def patched_setxattr_ENOTSUP(*args, **kwargs): raise OSError(errno.ENOTSUP, 'ENOTSUP') def patched_setxattr_EACCES(*args, **kwargs): raise OSError(errno.EACCES, 'EACCES') self.create_regular_file('file') xattr.setxattr('input/file', 'user.attribute', 'value') self.cmd('init', self.repository_location, '-e' 'none') self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): input_abspath = os.path.abspath('input/file') with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG): out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING) assert out == (input_abspath + ': when setting extended attribute user.attribute: too big for this filesystem\n') os.remove(input_abspath) with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP): out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING) assert out == (input_abspath + ': when setting extended attribute user.attribute: xattrs not supported on this filesystem\n') os.remove(input_abspath) with patch.object(xattr, 'setxattr', patched_setxattr_EACCES): out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING) assert out == (input_abspath + ': when setting extended attribute user.attribute: Permission denied\n') assert os.path.isfile(input_abspath) def test_path_normalization(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('dir1/dir2/file', size=1024 * 80) with changedir('input/dir1/dir2'): self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..') output = self.cmd('list', self.repository_location + '::test') self.assert_not_in('..', output) self.assert_in(' input/dir1/dir2/file', output) def test_exclude_normalization(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) with changedir('input'): self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.') with changedir('output'): self.cmd('extract', self.repository_location + '::test1') self.assert_equal(sorted(os.listdir('output')), ['file2']) with changedir('input'): self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.') with changedir('output'): self.cmd('extract', self.repository_location + '::test2') self.assert_equal(sorted(os.listdir('output')), ['file2']) self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test3') self.assert_equal(sorted(os.listdir('output/input')), ['file2']) def test_repeated_files(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', 'input') def test_overwrite(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') # Overwriting regular files and directories should be supported os.mkdir('output/input') os.mkdir('output/input/file1') os.mkdir('output/input/dir2') with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_dirs_equal('input', 'output/input') # But non-empty dirs should fail os.unlink('output/input/file1') os.mkdir('output/input/file1') os.mkdir('output/input/file1/dir') with changedir('output'): self.cmd('extract', self.repository_location + '::test', exit_code=1) def test_rename(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('create', self.repository_location + '::test.2', 'input') self.cmd('extract', '--dry-run', self.repository_location + '::test') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') self.cmd('rename', self.repository_location + '::test', 'test.3') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') self.cmd('rename', self.repository_location + '::test.2', 'test.4') self.cmd('extract', '--dry-run', self.repository_location + '::test.3') self.cmd('extract', '--dry-run', self.repository_location + '::test.4') # Make sure both archives have been renamed with Repository(self.repository_path) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) self.assert_equal(len(manifest.archives), 2) self.assert_in('test.3', manifest.archives) self.assert_in('test.4', manifest.archives) def test_info(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') info_repo = self.cmd('info', self.repository_location) assert 'All archives:' in info_repo info_archive = self.cmd('info', self.repository_location + '::test') assert 'Archive name: test\n' in info_archive info_archive = self.cmd('info', '--first', '1', self.repository_location) assert 'Archive name: test\n' in info_archive def test_info_json(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') info_repo = json.loads(self.cmd('info', '--json', self.repository_location)) repository = info_repo['repository'] assert len(repository['id']) == 64 assert 'last_modified' in repository assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise assert info_repo['encryption']['mode'] == 'repokey' assert 'keyfile' not in info_repo['encryption'] cache = info_repo['cache'] stats = cache['stats'] assert all(isinstance(o, int) for o in stats.values()) assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size')) info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test')) assert info_repo['repository'] == info_archive['repository'] assert info_repo['cache'] == info_archive['cache'] archives = info_archive['archives'] assert len(archives) == 1 archive = archives[0] assert archive['name'] == 'test' assert isinstance(archive['command_line'], list) assert isinstance(archive['duration'], float) assert len(archive['id']) == 64 assert 'stats' in archive assert datetime.strptime(archive['start'], ISO_FORMAT) assert datetime.strptime(archive['end'], ISO_FORMAT) def test_comment(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', 'input') self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input') self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input') self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input') assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1') assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2') self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment') self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment') self.cmd('recreate', self.repository_location + '::test3', '--comment', '') self.cmd('recreate', self.repository_location + '::test4', '12345') assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1') assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2') assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3') assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4') def test_delete(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('create', self.repository_location + '::test.2', 'input') self.cmd('create', self.repository_location + '::test.3', 'input') self.cmd('create', self.repository_location + '::another_test.1', 'input') self.cmd('create', self.repository_location + '::another_test.2', 'input') self.cmd('extract', '--dry-run', self.repository_location + '::test') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') self.cmd('delete', '--prefix', 'another_', self.repository_location) self.cmd('delete', '--last', '1', self.repository_location) self.cmd('delete', self.repository_location + '::test') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') output = self.cmd('delete', '--stats', self.repository_location + '::test.2') self.assert_in('Deleted data:', output) # Make sure all data except the manifest has been deleted with Repository(self.repository_path) as repository: self.assert_equal(len(repository), 1) def test_delete_multiple(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', 'input') self.cmd('create', self.repository_location + '::test2', 'input') self.cmd('create', self.repository_location + '::test3', 'input') self.cmd('delete', self.repository_location + '::test1', 'test2') self.cmd('extract', '--dry-run', self.repository_location + '::test3') self.cmd('delete', self.repository_location, 'test3') assert not self.cmd('list', self.repository_location) def test_delete_repo(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('create', self.repository_location + '::test.2', 'input') os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no' self.cmd('delete', self.repository_location, exit_code=2) assert os.path.exists(self.repository_path) os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES' self.cmd('delete', self.repository_location) # Make sure the repo is gone self.assertFalse(os.path.exists(self.repository_path)) def test_delete_force(self): self.cmd('init', '--encryption=none', self.repository_location) self.create_src_archive('test') with Repository(self.repository_path, exclusive=True) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) archive = Archive(repository, key, manifest, 'test') for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): repository.delete(item.chunks[-1].id) break else: assert False # missed the file repository.commit() output = self.cmd('delete', '--force', self.repository_location + '::test') self.assert_in('deleted archive was corrupted', output) self.cmd('check', '--repair', self.repository_location) output = self.cmd('list', self.repository_location) self.assert_not_in('test', output) def test_delete_double_force(self): self.cmd('init', '--encryption=none', self.repository_location) self.create_src_archive('test') with Repository(self.repository_path, exclusive=True) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) archive = Archive(repository, key, manifest, 'test') id = archive.metadata.items[0] repository.put(id, b'corrupted items metadata stream chunk') repository.commit() self.cmd('delete', '--force', '--force', self.repository_location + '::test') self.cmd('check', '--repair', self.repository_location) output = self.cmd('list', self.repository_location) self.assert_not_in('test', output) def test_corrupted_repository(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') self.cmd('extract', '--dry-run', self.repository_location + '::test') output = self.cmd('check', '--show-version', self.repository_location) self.assert_in('borgbackup version', output) # implied output even without --info given self.assert_not_in('Starting repository check', output) # --info not given for root logger name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1] with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd: fd.seek(100) fd.write(b'XXXX') output = self.cmd('check', '--info', self.repository_location, exit_code=1) self.assert_in('Starting repository check', output) # --info given for root logger def test_readonly_check(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('check', '--verify-data', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('check', '--verify-data', self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('check', '--verify-data', self.repository_location, '--bypass-lock') def test_readonly_diff(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('a') self.create_src_archive('b') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('diff', '%s::a' % self.repository_location, 'b', exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('diff', '%s::a' % self.repository_location, 'b') if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('diff', '%s::a' % self.repository_location, 'b', '--bypass-lock') def test_readonly_export_tar(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar') if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', '--bypass-lock') def test_readonly_extract(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('extract', '%s::test' % self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('extract', '%s::test' % self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('extract', '%s::test' % self.repository_location, '--bypass-lock') def test_readonly_info(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('info', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('info', self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('info', self.repository_location, '--bypass-lock') def test_readonly_list(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('list', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('list', self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('list', self.repository_location, '--bypass-lock') @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_readonly_mount(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: with self.fuse_mount(self.repository_location, exit_code=EXIT_ERROR): pass else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: # self.fuse_mount always assumes fork=True, so for this test we have to manually set fork=False with self.fuse_mount(self.repository_location, fork=False): pass if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock with self.fuse_mount(self.repository_location, None, '--bypass-lock'): pass @pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable') def test_umask(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') mode = os.stat(self.repository_path).st_mode self.assertEqual(stat.S_IMODE(mode), 0o700) def test_create_dry_run(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--dry-run', self.repository_location + '::test', 'input') # Make sure no archive has been created with Repository(self.repository_path) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) self.assert_equal(len(manifest.archives), 0) def add_unknown_feature(self, operation): with Repository(self.repository_path, exclusive=True) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}} manifest.write() repository.commit() def cmd_raises_unknown_feature(self, args): if self.FORK_DEFAULT: self.cmd(*args, exit_code=EXIT_ERROR) else: with pytest.raises(MandatoryFeatureUnsupported) as excinfo: self.cmd(*args) assert excinfo.value.args == (['unknown-feature'],) def test_unknown_feature_on_create(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.add_unknown_feature(Manifest.Operation.WRITE) self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input']) def test_unknown_feature_on_cache_sync(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('delete', '--cache-only', self.repository_location) self.add_unknown_feature(Manifest.Operation.READ) self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input']) def test_unknown_feature_on_change_passphrase(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.add_unknown_feature(Manifest.Operation.CHECK) self.cmd_raises_unknown_feature(['change-passphrase', self.repository_location]) def test_unknown_feature_on_read(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.READ) with changedir('output'): self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test']) self.cmd_raises_unknown_feature(['list', self.repository_location]) self.cmd_raises_unknown_feature(['info', self.repository_location + '::test']) def test_unknown_feature_on_rename(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.CHECK) self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other']) def test_unknown_feature_on_delete(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.DELETE) # delete of an archive raises self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test']) self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location]) # delete of the whole repository ignores features self.cmd('delete', self.repository_location) @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_unknown_feature_on_mount(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.READ) mountpoint = os.path.join(self.tmpdir, 'mountpoint') os.mkdir(mountpoint) # XXX this might hang if it doesn't raise an error self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint]) @pytest.mark.allow_cache_wipe def test_unknown_mandatory_feature_in_cache(self): if self.prefix: path_prefix = 'ssh://__testsuite__' else: path_prefix = '' print(self.cmd('init', '--encryption=repokey', self.repository_location)) with Repository(self.repository_path, exclusive=True) as repository: if path_prefix: repository._location = Location(self.repository_location) manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest) as cache: cache.begin_txn() cache.cache_config.mandatory_features = set(['unknown-feature']) cache.commit() if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test', 'input') else: called = False wipe_cache_safe = LocalCache.wipe_cache def wipe_wrapper(*args): nonlocal called called = True wipe_cache_safe(*args) with patch.object(LocalCache, 'wipe_cache', wipe_wrapper): self.cmd('create', self.repository_location + '::test', 'input') assert called with Repository(self.repository_path, exclusive=True) as repository: if path_prefix: repository._location = Location(self.repository_location) manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest) as cache: assert cache.cache_config.mandatory_features == set([]) def test_progress_on(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input') self.assert_in("\r", output) def test_progress_off(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', self.repository_location + '::test5', 'input') self.assert_not_in("\r", output) def test_file_status(self): """test that various file status show expected results clearly incomplete: only tests for the weird "unchanged" status for now""" self.create_regular_file('file1', size=1024 * 80) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', self.repository_location + '::test', 'input') self.assert_in("A input/file1", output) self.assert_in("A input/file2", output) # should find first file as unmodified output = self.cmd('create', '--list', self.repository_location + '::test1', 'input') self.assert_in("U input/file1", output) # this is expected, although surprising, for why, see: # https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file self.assert_in("A input/file2", output) def test_file_status_cs_cache_mode(self): """test that a changed file with faked "previous" mtime still gets backed up in ctime,size cache_mode""" self.create_regular_file('file1', contents=b'123') time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=10) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input') # modify file1, but cheat with the mtime (and atime) and also keep same size: st = os.stat('input/file1') self.create_regular_file('file1', contents=b'321') os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns)) # this mode uses ctime for change detection, so it should find file1 as modified output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input') self.assert_in("M input/file1", output) def test_file_status_ms_cache_mode(self): """test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode""" self.create_regular_file('file1', size=10) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=10) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input') # change mode of file1, no content change: st = os.stat('input/file1') os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged # this mode uses mtime for change detection, so it should find file1 as unmodified output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input') self.assert_in("U input/file1", output) def test_file_status_rc_cache_mode(self): """test that files get rechunked unconditionally in rechunk,ctime cache mode""" self.create_regular_file('file1', size=10) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=10) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input') # no changes here, but this mode rechunks unconditionally output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input') self.assert_in("A input/file1", output) def test_file_status_excluded(self): """test that excluded paths are listed""" self.create_regular_file('file1', size=1024 * 80) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=1024 * 80) if has_lchflags: self.create_regular_file('file3', size=1024 * 80) platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input') self.assert_in("A input/file1", output) self.assert_in("A input/file2", output) if has_lchflags: self.assert_in("x input/file3", output) # should find second file as excluded output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2') self.assert_in("U input/file1", output) self.assert_in("x input/file2", output) if has_lchflags: self.assert_in("x input/file3", output) def test_create_json(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input')) # The usual keys assert 'encryption' in create_info assert 'repository' in create_info assert 'cache' in create_info assert 'last_modified' in create_info['repository'] archive = create_info['archive'] assert archive['name'] == 'test' assert isinstance(archive['command_line'], list) assert isinstance(archive['duration'], float) assert len(archive['id']) == 64 assert 'stats' in archive def test_create_topical(self): self.create_regular_file('file1', size=1024 * 80) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) # no listing by default output = self.cmd('create', self.repository_location + '::test', 'input') self.assert_not_in('file1', output) # shouldn't be listed even if unchanged output = self.cmd('create', self.repository_location + '::test0', 'input') self.assert_not_in('file1', output) # should list the file as unchanged output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input') self.assert_in('file1', output) # should *not* list the file as changed output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input') self.assert_not_in('file1', output) # change the file self.create_regular_file('file1', size=1024 * 100) # should list the file as changed output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input') self.assert_in('file1', output) def test_create_read_special_broken_symlink(self): os.symlink('somewhere doesnt exist', os.path.join(self.input_path, 'link')) self.cmd('init', '--encryption=repokey', self.repository_location) archive = self.repository_location + '::test' self.cmd('create', '--read-special', archive, 'input') output = self.cmd('list', archive) assert 'input/link -> somewhere doesnt exist' in output # def test_cmdline_compatibility(self): # self.create_regular_file('file1', size=1024 * 80) # self.cmd('init', '--encryption=repokey', self.repository_location) # self.cmd('create', self.repository_location + '::test', 'input') # output = self.cmd('foo', self.repository_location, '--old') # self.assert_in('"--old" has been deprecated. Use "--new" instead', output) def test_prune_repository(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', src_dir) self.cmd('create', self.repository_location + '::test2', src_dir) # these are not really a checkpoints, but they look like some: self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir) self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir) self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2') self.assert_in('Keeping archive: test2', output) self.assert_in('Would prune: test1', output) # must keep the latest non-checkpoint archive: self.assert_in('Keeping archive: test2', output) # must keep the latest checkpoint archive: self.assert_in('Keeping archive: test4.checkpoint', output) output = self.cmd('list', self.repository_location) self.assert_in('test1', output) self.assert_in('test2', output) self.assert_in('test3.checkpoint', output) self.assert_in('test3.checkpoint.1', output) self.assert_in('test4.checkpoint', output) self.cmd('prune', self.repository_location, '--keep-daily=2') output = self.cmd('list', self.repository_location) self.assert_not_in('test1', output) # the latest non-checkpoint archive must be still there: self.assert_in('test2', output) # only the latest checkpoint archive must still be there: self.assert_not_in('test3.checkpoint', output) self.assert_not_in('test3.checkpoint.1', output) self.assert_in('test4.checkpoint', output) # now we supercede the latest checkpoint by a successful backup: self.cmd('create', self.repository_location + '::test5', src_dir) self.cmd('prune', self.repository_location, '--keep-daily=2') output = self.cmd('list', self.repository_location) # all checkpoints should be gone now: self.assert_not_in('checkpoint', output) # the latest archive must be still there self.assert_in('test5', output) def test_prune_repository_save_space(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', src_dir) self.cmd('create', self.repository_location + '::test2', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2') self.assert_in('Keeping archive: test2', output) self.assert_in('Would prune: test1', output) output = self.cmd('list', self.repository_location) self.assert_in('test1', output) self.assert_in('test2', output) self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=2') output = self.cmd('list', self.repository_location) self.assert_not_in('test1', output) self.assert_in('test2', output) def test_prune_repository_prefix(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir) self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir) self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir) self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--prefix=foo-') self.assert_in('Keeping archive: foo-2015-08-12-20:00', output) self.assert_in('Would prune: foo-2015-08-12-10:00', output) output = self.cmd('list', self.repository_location) self.assert_in('foo-2015-08-12-10:00', output) self.assert_in('foo-2015-08-12-20:00', output) self.assert_in('bar-2015-08-12-10:00', output) self.assert_in('bar-2015-08-12-20:00', output) self.cmd('prune', self.repository_location, '--keep-daily=2', '--prefix=foo-') output = self.cmd('list', self.repository_location) self.assert_not_in('foo-2015-08-12-10:00', output) self.assert_in('foo-2015-08-12-20:00', output) self.assert_in('bar-2015-08-12-10:00', output) self.assert_in('bar-2015-08-12-20:00', output) def test_prune_repository_glob(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir) self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir) self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir) self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--glob-archives=2015-*-foo') self.assert_in('Keeping archive: 2015-08-12-20:00-foo', output) self.assert_in('Would prune: 2015-08-12-10:00-foo', output) output = self.cmd('list', self.repository_location) self.assert_in('2015-08-12-10:00-foo', output) self.assert_in('2015-08-12-20:00-foo', output) self.assert_in('2015-08-12-10:00-bar', output) self.assert_in('2015-08-12-20:00-bar', output) self.cmd('prune', self.repository_location, '--keep-daily=2', '--glob-archives=2015-*-foo') output = self.cmd('list', self.repository_location) self.assert_not_in('2015-08-12-10:00-foo', output) self.assert_in('2015-08-12-20:00-foo', output) self.assert_in('2015-08-12-10:00-bar', output) self.assert_in('2015-08-12-20:00-bar', output) def test_list_prefix(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test-1', src_dir) self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir) self.cmd('create', self.repository_location + '::test-2', src_dir) output = self.cmd('list', '--prefix=test-', self.repository_location) self.assert_in('test-1', output) self.assert_in('test-2', output) self.assert_not_in('something-else', output) def test_list_format(self): self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', test_archive, src_dir) output_warn = self.cmd('list', '--list-format', '-', test_archive) self.assert_in('--list-format" has been deprecated.', output_warn) output_1 = self.cmd('list', test_archive) output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive) output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive) self.assertEqual(output_1, output_2) self.assertNotEqual(output_1, output_3) def test_list_repository_format(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir) self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir) output_1 = self.cmd('list', self.repository_location) output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location) self.assertEqual(output_1, output_2) output_1 = self.cmd('list', '--short', self.repository_location) self.assertEqual(output_1, 'test-1\ntest-2\n') output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location) self.assertEqual(output_1, 'test-1/test-2/') output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location) self.assert_in('test-1 comment 1\n', output_3) self.assert_in('test-2 comment 2\n', output_3) def test_list_hash(self): self.create_regular_file('empty_file', size=0) self.create_regular_file('amb', contents=b'a' * 1000000) self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', test_archive, 'input') output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive) assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output def test_list_chunk_counts(self): self.create_regular_file('empty_file', size=0) self.create_regular_file('two_chunks') with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd: fd.write(b'abba' * 2000000) fd.write(b'baab' * 2000000) self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', test_archive, 'input') output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive) assert "0 0 input/empty_file" in output assert "2 2 input/two_chunks" in output def test_list_size(self): self.create_regular_file('compressible_file', size=10000) self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', '-C', 'lz4', test_archive, 'input') output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive) size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ") assert int(csize) < int(size) assert int(dcsize) < int(dsize) assert int(dsize) <= int(size) assert int(dcsize) <= int(csize) def test_list_json(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') list_repo = json.loads(self.cmd('list', '--json', self.repository_location)) repository = list_repo['repository'] assert len(repository['id']) == 64 assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise assert list_repo['encryption']['mode'] == 'repokey' assert 'keyfile' not in list_repo['encryption'] archive0 = list_repo['archives'][0] assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test') items = [json.loads(s) for s in list_archive.splitlines()] assert len(items) == 2 file1 = items[1] assert file1['path'] == 'input/file1' assert file1['size'] == 81920 assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test') items = [json.loads(s) for s in list_archive.splitlines()] assert len(items) == 2 file1 = items[1] assert file1['path'] == 'input/file1' assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b' def test_list_json_args(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('list', '--json-lines', self.repository_location, exit_code=2) self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2) def test_log_json(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug') messages = {} # type -> message, one of each kind for line in log.splitlines(): msg = json.loads(line) messages[msg['type']] = msg file_status = messages['file_status'] assert 'status' in file_status assert file_status['path'].startswith('input') log_message = messages['log_message'] assert isinstance(log_message['time'], float) assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages assert isinstance(log_message['message'], str) def test_debug_profile(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof') self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof') stats = pstats.Stats('create.pyprof') stats.strip_dirs() stats.sort_stats('cumtime') self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof') stats = pstats.Stats('create.pyprof') # Only do this on trusted data! stats.strip_dirs() stats.sort_stats('cumtime') def test_common_options(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input') assert 'security: read previous location' in log def _get_sizes(self, compression, compressible, size=10000): if compressible: contents = b'X' * size else: contents = os.urandom(size) self.create_regular_file('file', contents=contents) self.cmd('init', '--encryption=none', self.repository_location) archive = self.repository_location + '::test' self.cmd('create', '-C', compression, archive, 'input') output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive) size, csize, path = output.split("\n")[1].split(" ") return int(size), int(csize) def test_compression_none_compressible(self): size, csize = self._get_sizes('none', compressible=True) assert csize >= size assert csize == size + 3 def test_compression_none_uncompressible(self): size, csize = self._get_sizes('none', compressible=False) assert csize >= size assert csize == size + 3 def test_compression_zlib_compressible(self): size, csize = self._get_sizes('zlib', compressible=True) assert csize < size * 0.1 assert csize == 35 def test_compression_zlib_uncompressible(self): size, csize = self._get_sizes('zlib', compressible=False) assert csize >= size def test_compression_auto_compressible(self): size, csize = self._get_sizes('auto,zlib', compressible=True) assert csize < size * 0.1 assert csize == 35 # same as compression 'zlib' def test_compression_auto_uncompressible(self): size, csize = self._get_sizes('auto,zlib', compressible=False) assert csize >= size assert csize == size + 3 # same as compression 'none' def test_compression_lz4_compressible(self): size, csize = self._get_sizes('lz4', compressible=True) assert csize < size * 0.1 def test_compression_lz4_uncompressible(self): size, csize = self._get_sizes('lz4', compressible=False) assert csize >= size def test_compression_lzma_compressible(self): size, csize = self._get_sizes('lzma', compressible=True) assert csize < size * 0.1 def test_compression_lzma_uncompressible(self): size, csize = self._get_sizes('lzma', compressible=False) assert csize >= size def test_change_passphrase(self): self.cmd('init', '--encryption=repokey', self.repository_location) os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase' # here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set: self.cmd('change-passphrase', self.repository_location) os.environ['BORG_PASSPHRASE'] = 'newpassphrase' self.cmd('list', self.repository_location) def test_break_lock(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('break-lock', self.repository_location) def test_usage(self): self.cmd() self.cmd('-h') def test_help(self): assert 'Borg' in self.cmd('help') assert 'patterns' in self.cmd('help', 'patterns') assert 'Initialize' in self.cmd('help', 'init') assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only') assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only') @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse(self): def has_noatime(some_file): atime_before = os.stat(some_file).st_atime_ns try: os.close(os.open(some_file, flags_noatime)) except PermissionError: return False else: atime_after = os.stat(some_file).st_atime_ns noatime_used = flags_noatime != flags_normal return noatime_used and atime_before == atime_after self.cmd('init', '--encryption=repokey', self.repository_location) self.create_test_files() have_noatime = has_noatime('input/file1') self.cmd('create', '--exclude-nodump', self.repository_location + '::archive', 'input') self.cmd('create', '--exclude-nodump', self.repository_location + '::archive2', 'input') if has_lchflags: # remove the file we did not backup, so input and output become equal os.remove(os.path.join('input', 'flagfile')) mountpoint = os.path.join(self.tmpdir, 'mountpoint') # mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint: with self.fuse_mount(self.repository_location, mountpoint): # bsdflags are not supported by the FUSE mount # we also ignore xattrs here, they are tested separately self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'), ignore_bsdflags=True, ignore_xattrs=True) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'), ignore_bsdflags=True, ignore_xattrs=True) # mount only 1 archive, its contents shall show up directly in mountpoint: with self.fuse_mount(self.repository_location + '::archive', mountpoint): self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'), ignore_bsdflags=True, ignore_xattrs=True) # regular file in_fn = 'input/file1' out_fn = os.path.join(mountpoint, 'input', 'file1') # stat sti1 = os.stat(in_fn) sto1 = os.stat(out_fn) assert sti1.st_mode == sto1.st_mode assert sti1.st_uid == sto1.st_uid assert sti1.st_gid == sto1.st_gid assert sti1.st_size == sto1.st_size if have_noatime: assert sti1.st_atime == sto1.st_atime assert sti1.st_ctime == sto1.st_ctime assert sti1.st_mtime == sto1.st_mtime if are_hardlinks_supported(): # note: there is another hardlink to this, see below assert sti1.st_nlink == sto1.st_nlink == 2 # read with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f: assert in_f.read() == out_f.read() # hardlink (to 'input/file1') if are_hardlinks_supported(): in_fn = 'input/hardlink' out_fn = os.path.join(mountpoint, 'input', 'hardlink') sti2 = os.stat(in_fn) sto2 = os.stat(out_fn) assert sti2.st_nlink == sto2.st_nlink == 2 assert sto1.st_ino == sto2.st_ino # symlink if are_symlinks_supported(): in_fn = 'input/link1' out_fn = os.path.join(mountpoint, 'input', 'link1') sti = os.stat(in_fn, follow_symlinks=False) sto = os.stat(out_fn, follow_symlinks=False) assert sti.st_size == len('somewhere') assert sto.st_size == len('somewhere') assert stat.S_ISLNK(sti.st_mode) assert stat.S_ISLNK(sto.st_mode) assert os.readlink(in_fn) == os.readlink(out_fn) # FIFO if are_fifos_supported(): out_fn = os.path.join(mountpoint, 'input', 'fifo1') sto = os.stat(out_fn) assert stat.S_ISFIFO(sto.st_mode) # list/read xattrs try: in_fn = 'input/fusexattr' out_fn = os.path.join(mountpoint, 'input', 'fusexattr') if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path): assert sorted(no_selinux(xattr.listxattr(out_fn))) == ['user.empty', 'user.foo', ] assert xattr.getxattr(out_fn, 'user.foo') == b'bar' # Special case: getxattr returns None (not b'') when reading an empty xattr. assert xattr.getxattr(out_fn, 'user.empty') is None else: assert no_selinux(xattr.listxattr(out_fn)) == [] try: xattr.getxattr(out_fn, 'user.foo') except OSError as e: assert e.errno == llfuse.ENOATTR else: assert False, "expected OSError(ENOATTR), but no error was raised" except OSError as err: if sys.platform.startswith(('freebsd', )) and err.errno == errno.ENOTSUP: # some systems have no xattr support on FUSE pass else: raise @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_versions_view(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('test', contents=b'first') if are_hardlinks_supported(): self.create_regular_file('hardlink1', contents=b'123456') os.link('input/hardlink1', 'input/hardlink2') os.link('input/hardlink1', 'input/hardlink3') self.cmd('create', self.repository_location + '::archive1', 'input') self.create_regular_file('test', contents=b'second') self.cmd('create', self.repository_location + '::archive2', 'input') mountpoint = os.path.join(self.tmpdir, 'mountpoint') # mount the whole repository, archive contents shall show up in versioned view: with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'): path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ... files = os.listdir(path) assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files} if are_hardlinks_supported(): hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001') hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001') hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001') assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino assert open(hl3, 'rb').read() == b'123456' # similar again, but exclude the hardlink master: with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'): if are_hardlinks_supported(): hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001') hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001') assert os.stat(hl2).st_ino == os.stat(hl3).st_ino assert open(hl3, 'rb').read() == b'123456' @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_allow_damaged_files(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive') # Get rid of a chunk and repair it archive, repository = self.open_archive('archive') with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): repository.delete(item.chunks[-1].id) path = item.path # store full path for later break else: assert False # missed the file repository.commit() self.cmd('check', '--repair', self.repository_location, exit_code=0) mountpoint = os.path.join(self.tmpdir, 'mountpoint') with self.fuse_mount(self.repository_location + '::archive', mountpoint): with pytest.raises(OSError) as excinfo: open(os.path.join(mountpoint, path)) assert excinfo.value.errno == errno.EIO with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'): open(os.path.join(mountpoint, path)).close() @unittest.skipUnless(has_llfuse, 'llfuse not installed') def test_fuse_mount_options(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('arch11') self.create_src_archive('arch12') self.create_src_archive('arch21') self.create_src_archive('arch22') mountpoint = os.path.join(self.tmpdir, 'mountpoint') with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12'] with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'): assert sorted(os.listdir(os.path.join(mountpoint))) == [] def verify_aes_counter_uniqueness(self, method): seen = set() # Chunks already seen used = set() # counter values already used def verify_uniqueness(): with Repository(self.repository_path) as repository: for id, _ in repository.open_index(repository.get_transaction_id()).iteritems(): data = repository.get(id) hash = sha256(data).digest() if hash not in seen: seen.add(hash) num_blocks = num_aes_blocks(len(data) - 41) nonce = bytes_to_long(data[33:41]) for counter in range(nonce, nonce + num_blocks): self.assert_not_in(counter, used) used.add(counter) self.create_test_files() os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=' + method, self.repository_location) verify_uniqueness() self.cmd('create', self.repository_location + '::test', 'input') verify_uniqueness() self.cmd('create', self.repository_location + '::test.2', 'input') verify_uniqueness() self.cmd('delete', self.repository_location + '::test.2') verify_uniqueness() def test_aes_counter_uniqueness_keyfile(self): self.verify_aes_counter_uniqueness('keyfile') def test_aes_counter_uniqueness_passphrase(self): self.verify_aes_counter_uniqueness('repokey') def test_debug_dump_archive_items(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test') output_dir = sorted(os.listdir('output')) assert len(output_dir) > 0 and output_dir[0].startswith('000000_') assert 'Done.' in output def test_debug_dump_repo_objs(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('debug', 'dump-repo-objs', self.repository_location) output_dir = sorted(os.listdir('output')) assert len(output_dir) > 0 and output_dir[0].startswith('00000000_') assert 'Done.' in output def test_debug_put_get_delete_obj(self): self.cmd('init', '--encryption=repokey', self.repository_location) data = b'some data' hexkey = sha256(data).hexdigest() self.create_regular_file('file', contents=data) output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file') assert hexkey in output output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file') assert hexkey in output with open('output/file', 'rb') as f: data_read = f.read() assert data == data_read output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey) assert "deleted" in output output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey) assert "not found" in output output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid') assert "is invalid" in output def test_init_interrupt(self): def raise_eof(*args): raise EOFError with patch.object(KeyfileKeyBase, 'create', raise_eof): self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1) assert not os.path.exists(self.repository_location) def test_init_requires_encryption_option(self): self.cmd('init', self.repository_location, exit_code=2) def test_init_nested_repositories(self): self.cmd('init', '--encryption=repokey', self.repository_location) if self.FORK_DEFAULT: self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2) else: with pytest.raises(Repository.AlreadyExists): self.cmd('init', '--encryption=repokey', self.repository_location + '/nested') def check_cache(self): # First run a regular borg check self.cmd('check', self.repository_location) # Then check that the cache on disk matches exactly what's in the repo. with self.open_repository() as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest, sync=False) as cache: original_chunks = cache.chunks Cache.destroy(repository) with Cache(repository, key, manifest) as cache: correct_chunks = cache.chunks assert original_chunks is not correct_chunks seen = set() for id, (refcount, size, csize) in correct_chunks.iteritems(): o_refcount, o_size, o_csize = original_chunks[id] assert refcount == o_refcount assert size == o_size assert csize == o_csize seen.add(id) for id, (refcount, size, csize) in original_chunks.iteritems(): assert id in seen def test_check_cache(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with self.open_repository() as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest, sync=False) as cache: cache.begin_txn() cache.chunks.incref(list(cache.chunks.iteritems())[0][0]) cache.commit() with pytest.raises(AssertionError): self.check_cache() def test_recreate_target_rc(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2) assert 'Need to specify single archive' in output def test_recreate_target(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.check_cache() archive = self.repository_location + '::test0' self.cmd('create', archive, 'input') self.check_cache() original_archive = self.cmd('list', self.repository_location) self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive') self.check_cache() archives = self.cmd('list', self.repository_location) assert original_archive in archives assert 'new-archive' in archives archive = self.repository_location + '::new-archive' listing = self.cmd('list', '--short', archive) assert 'file1' not in listing assert 'dir2/file2' in listing assert 'dir2/file3' not in listing def test_recreate_basic(self): self.create_test_files() self.create_regular_file('dir2/file3', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) archive = self.repository_location + '::test0' self.cmd('create', archive, 'input') self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3') self.check_cache() listing = self.cmd('list', '--short', archive) assert 'file1' not in listing assert 'dir2/file2' in listing assert 'dir2/file3' not in listing @pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') def test_recreate_subtree_hardlinks(self): # This is essentially the same problem set as in test_extract_hardlinks self._extract_hardlinks_setup() self.cmd('create', self.repository_location + '::test2', 'input') self.cmd('recreate', self.repository_location + '::test', 'input/dir1') self.check_cache() with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 with changedir('output'): self.cmd('extract', self.repository_location + '::test2') assert os.stat('input/dir1/hardlink').st_nlink == 4 def test_recreate_rechunkify(self): with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd: fd.write(b'a' * 280) fd.write(b'b' * 280) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input') self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled') list = self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format', '{num_chunks} {unique_chunks}') num_chunks, unique_chunks = map(int, list.split(' ')) # test1 and test2 do not deduplicate assert num_chunks == unique_chunks self.cmd('recreate', self.repository_location, '--chunker-params', 'default') self.check_cache() # test1 and test2 do deduplicate after recreate assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}')) assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format', '{unique_chunks}')) def test_recreate_recompress(self): self.create_regular_file('compressible', size=10000) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none') file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible', '--format', '{size} {csize} {sha256}') size, csize, sha256_before = file_list.split(' ') assert int(csize) >= int(size) # >= due to metadata overhead self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress') self.check_cache() file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible', '--format', '{size} {csize} {sha256}') size, csize, sha256_after = file_list.split(' ') assert int(csize) < int(size) assert sha256_before == sha256_after def test_recreate_timestamp(self): local_timezone = datetime.now(timezone(timedelta(0))).astimezone().tzinfo self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) archive = self.repository_location + '::test0' self.cmd('create', archive, 'input') self.cmd('recreate', '--timestamp', "1970-01-02T00:00:00", '--comment', 'test', archive) info = self.cmd('info', archive).splitlines() dtime = datetime(1970, 1, 2) + local_timezone.utcoffset(None) s_time = dtime.strftime("%Y-%m-%d") assert any([re.search(r'Time \(start\).+ %s' % s_time, item) for item in info]) assert any([re.search(r'Time \(end\).+ %s' % s_time, item) for item in info]) def test_recreate_dry_run(self): self.create_regular_file('compressible', size=10000) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') archives_before = self.cmd('list', self.repository_location + '::test') self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible') self.check_cache() archives_after = self.cmd('list', self.repository_location + '::test') assert archives_after == archives_before def test_recreate_skips_nothing_to_do(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') info_before = self.cmd('info', self.repository_location + '::test') self.cmd('recreate', self.repository_location, '--chunker-params', 'default') self.check_cache() info_after = self.cmd('info', self.repository_location + '::test') assert info_before == info_after # includes archive ID def test_with_lock(self): self.cmd('init', '--encryption=repokey', self.repository_location) lock_path = os.path.join(self.repository_path, 'lock.exclusive') cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42) def test_recreate_list_output(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=0) self.create_regular_file('file2', size=0) self.create_regular_file('file3', size=0) self.create_regular_file('file4', size=0) self.create_regular_file('file5', size=0) self.cmd('create', self.repository_location + '::test', 'input') output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2') self.check_cache() self.assert_in("input/file1", output) self.assert_in("x input/file2", output) output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3') self.check_cache() self.assert_in("input/file1", output) self.assert_in("x input/file3", output) output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4') self.check_cache() self.assert_not_in("input/file1", output) self.assert_not_in("x input/file4", output) output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5') self.check_cache() self.assert_not_in("input/file1", output) self.assert_not_in("x input/file5", output) def test_bad_filters(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2) def test_key_export_keyfile(self): export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'keyfile') repo_id = self._extract_repository_id(self.repository_path) self.cmd('key', 'export', self.repository_location, export_file) with open(export_file, 'r') as fd: export_contents = fd.read() assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n') key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0] with open(key_file, 'r') as fd: key_contents = fd.read() assert key_contents == export_contents os.unlink(key_file) self.cmd('key', 'import', self.repository_location, export_file) with open(key_file, 'r') as fd: key_contents2 = fd.read() assert key_contents2 == key_contents def test_key_export_repokey(self): export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'repokey') repo_id = self._extract_repository_id(self.repository_path) self.cmd('key', 'export', self.repository_location, export_file) with open(export_file, 'r') as fd: export_contents = fd.read() assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n') with Repository(self.repository_path) as repository: repo_key = RepoKey(repository) repo_key.load(None, Passphrase.env_passphrase()) backup_key = KeyfileKey(key.TestKey.MockRepository()) backup_key.load(export_file, Passphrase.env_passphrase()) assert repo_key.enc_key == backup_key.enc_key with Repository(self.repository_path) as repository: repository.save_key(b'') self.cmd('key', 'import', self.repository_location, export_file) with Repository(self.repository_path) as repository: repo_key2 = RepoKey(repository) repo_key2.load(None, Passphrase.env_passphrase()) assert repo_key2.enc_key == repo_key2.enc_key def test_key_export_qr(self): export_file = self.output_path + '/exported.html' self.cmd('init', self.repository_location, '--encryption', 'repokey') repo_id = self._extract_repository_id(self.repository_path) self.cmd('key', 'export', '--qr-html', self.repository_location, export_file) with open(export_file, 'r', encoding='utf-8') as fd: export_contents = fd.read() assert bin_to_hex(repo_id) in export_contents assert export_contents.startswith('') assert export_contents.endswith('\n') def test_key_export_directory(self): export_directory = self.output_path + '/exported' os.mkdir(export_directory) self.cmd('init', self.repository_location, '--encryption', 'repokey') self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR) def test_key_import_errors(self): export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'keyfile') self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR) with open(export_file, 'w') as fd: fd.write('something not a key\n') if self.FORK_DEFAULT: self.cmd('key', 'import', self.repository_location, export_file, exit_code=2) else: with pytest.raises(NotABorgKeyFile): self.cmd('key', 'import', self.repository_location, export_file) with open(export_file, 'w') as fd: fd.write('BORG_KEY a0a0a0\n') if self.FORK_DEFAULT: self.cmd('key', 'import', self.repository_location, export_file, exit_code=2) else: with pytest.raises(RepoIdMismatch): self.cmd('key', 'import', self.repository_location, export_file) def test_key_export_paperkey(self): repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239' export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'keyfile') self._set_repository_id(self.repository_path, unhexlify(repo_id)) key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0] with open(key_file, 'w') as fd: fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n') fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode()) self.cmd('key', 'export', '--paper', self.repository_location, export_file) with open(export_file, 'r') as fd: export_contents = fd.read() assert export_contents == """To restore key use borg key import --paper /path/to/repo BORG PAPER KEY v1 id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02 1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d 2: 737475 - 88 """ def test_key_import_paperkey(self): repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239' self.cmd('init', self.repository_location, '--encryption', 'keyfile') self._set_repository_id(self.repository_path, unhexlify(repo_id)) key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0] with open(key_file, 'w') as fd: fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n') fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode()) typed_input = ( b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-" b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/" b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41) b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n' b'\n\n' # Abort [yN] => N b'737475 88\n' # missing "-" b'73747i - 88\n' # typo b'73747 - 88\n' # missing nibble b'73 74 75 - 89\n' # line checksum mismatch b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n' b'73 74 75 - 88\n' ) # In case that this has to change, here is a quick way to find a colliding line hash: # # from hashlib import sha256 # hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2] # for i in range(1000): # if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash # print(i.to_bytes(2, 'big')) # break self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input) # Test abort paths typed_input = b'\ny\n' self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input) typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n' self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input) def test_debug_dump_manifest(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') dump_file = self.output_path + '/dump' output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file) assert output == "" with open(dump_file, "r") as f: result = json.load(f) assert 'archives' in result assert 'config' in result assert 'item_keys' in result assert 'timestamp' in result assert 'version' in result def test_debug_dump_archive(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') dump_file = self.output_path + '/dump' output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file) assert output == "" with open(dump_file, "r") as f: result = json.load(f) assert '_name' in result assert '_manifest_entry' in result assert '_meta' in result assert '_items' in result def test_debug_refcount_obj(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip() assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].' create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input')) archive_id = create_json['archive']['id'] output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip() assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].' # Invalid IDs do not abort or return an error output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip() assert output == 'object id 124 is invalid.\nobject id xyza is invalid.' def test_debug_info(self): output = self.cmd('debug', 'info') assert 'CRC implementation' in output assert 'Python' in output def test_benchmark_crud(self): self.cmd('init', '--encryption=repokey', self.repository_location) with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'): self.cmd('benchmark', 'crud', self.repository_location, self.input_path) def test_config(self): self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('config', '--list', self.repository_location) self.assert_in('[repository]', output) self.assert_in('version', output) self.assert_in('segments_per_dir', output) self.assert_in('storage_quota', output) self.assert_in('append_only', output) self.assert_in('additional_free_space', output) self.assert_in('id', output) for cfg_key, cfg_value in [ ('additional_free_space', '2G'), ('repository.append_only', '1'), ]: output = self.cmd('config', self.repository_location, cfg_key) assert output == '0' + '\n' self.cmd('config', self.repository_location, cfg_key, cfg_value) output = self.cmd('config', self.repository_location, cfg_key) assert output == cfg_value + '\n' self.cmd('config', '--delete', self.repository_location, cfg_key) self.cmd('config', self.repository_location, cfg_key, exit_code=1) self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2) self.cmd('config', self.repository_location, exit_code=2) self.cmd('config', self.repository_location, 'invalid-option', exit_code=1) requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.') requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.') @requires_gnutar def test_export_tar(self): self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress') with changedir('output'): # This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask. subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp']) self.assert_dirs_equal('input', 'output/input', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True) @requires_gnutar @requires_gzip def test_export_tar_gz(self): if not shutil.which('gzip'): pytest.skip('gzip is not installed') self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list') assert 'input/file1\n' in list assert 'input/dir2\n' in list with changedir('output'): subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp']) self.assert_dirs_equal('input', 'output/input', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True) @requires_gnutar def test_export_tar_strip_components(self): if not shutil.which('gzip'): pytest.skip('gzip is not installed') self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list') # --list's path are those before processing with --strip-components assert 'input/file1\n' in list assert 'input/dir2\n' in list with changedir('output'): subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp']) self.assert_dirs_equal('input', 'output/', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True) @requires_hardlinks @requires_gnutar def test_export_tar_strip_components_links(self): self._extract_hardlinks_setup() self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2') with changedir('output'): subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp']) assert os.stat('hardlink').st_nlink == 2 assert os.stat('subdir/hardlink').st_nlink == 2 assert os.stat('aaaa').st_nlink == 2 assert os.stat('source2').st_nlink == 2 @requires_hardlinks @requires_gnutar def test_extract_hardlinks_tar(self): self._extract_hardlinks_setup() self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1') with changedir('output'): subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp']) assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 def test_detect_attic_repo(self): path = make_attic_repo(self.repository_path) cmds = [ ['create', path + '::test', self.tmpdir], ['extract', path + '::test'], ['check', path], ['rename', path + '::test', 'newname'], ['list', path], ['delete', path], ['prune', path], ['info', path + '::test'], ['key', 'export', path, 'exported'], ['key', 'import', path, 'import'], ['change-passphrase', path], ['break-lock', path], ] for args in cmds: output = self.cmd(*args, fork=True, exit_code=2) assert 'Attic repository detected.' in output @unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available') class ArchiverTestCaseBinary(ArchiverTestCase): EXE = 'borg.exe' FORK_DEFAULT = True @unittest.skip('does not raise Exception, but sets rc==2') def test_init_parent_dirs(self): pass @unittest.skip('patches objects') def test_init_interrupt(self): pass @unittest.skip('patches objects') def test_extract_capabilities(self): pass @unittest.skip('patches objects') def test_extract_xattrs_errors(self): pass @unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.') def test_basic_functionality(self): pass @unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.') def test_overwrite(self): pass def test_fuse(self): if fakeroot_detected(): unittest.skip('test_fuse with the binary is not compatible with fakeroot') else: super().test_fuse() class ArchiverCheckTestCase(ArchiverTestCaseBase): def setUp(self): super().setUp() with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1') self.create_src_archive('archive2') def test_check_usage(self): output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) self.assert_in('Checking segments', output) # reset logging to new process default to avoid need for fork=True on next check logging.getLogger('borg.output.progress').setLevel(logging.NOTSET) output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_not_in('Starting archive consistency check', output) self.assert_not_in('Checking segments', output) output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0) self.assert_not_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0) self.assert_not_in('archive1', output) output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_not_in('archive2', output) output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0) self.assert_not_in('archive1', output) self.assert_in('archive2', output) def test_missing_file_chunk(self): archive, repository = self.open_archive('archive1') with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): valid_chunks = item.chunks killed_chunk = valid_chunks[-1] repository.delete(killed_chunk.id) break else: self.fail('should not happen') repository.commit() self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '--repair', self.repository_location, exit_code=0) self.assert_in('New missing file chunk detected', output) self.cmd('check', self.repository_location, exit_code=0) output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0) self.assert_in('broken#', output) # check that the file in the old archives has now a different chunk list without the killed chunk for archive_name in ('archive1', 'archive2'): archive, repository = self.open_archive(archive_name) with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): self.assert_not_equal(valid_chunks, item.chunks) self.assert_not_in(killed_chunk, item.chunks) break else: self.fail('should not happen') # do a fresh backup (that will include the killed chunk) with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10): self.create_src_archive('archive3') # check should be able to heal the file now: output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('Healed previously missing file chunk', output) self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output) # check that the file in the old archives has the correct chunks again for archive_name in ('archive1', 'archive2'): archive, repository = self.open_archive(archive_name) with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): self.assert_equal(valid_chunks, item.chunks) break else: self.fail('should not happen') # list is also all-healthy again output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0) self.assert_not_in('broken#', output) def test_missing_archive_item_chunk(self): archive, repository = self.open_archive('archive1') with repository: repository.delete(archive.metadata.items[0]) repository.commit() self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) self.cmd('check', self.repository_location, exit_code=0) def test_missing_archive_metadata(self): archive, repository = self.open_archive('archive1') with repository: repository.delete(archive.id) repository.commit() self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) self.cmd('check', self.repository_location, exit_code=0) def test_missing_manifest(self): archive, repository = self.open_archive('archive1') with repository: repository.delete(Manifest.MANIFEST_ID) repository.commit() self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_in('archive2', output) self.cmd('check', self.repository_location, exit_code=0) def test_corrupted_manifest(self): archive, repository = self.open_archive('archive1') with repository: manifest = repository.get(Manifest.MANIFEST_ID) corrupted_manifest = manifest + b'corrupted!' repository.put(Manifest.MANIFEST_ID, corrupted_manifest) repository.commit() self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_in('archive2', output) self.cmd('check', self.repository_location, exit_code=0) def test_manifest_rebuild_corrupted_chunk(self): archive, repository = self.open_archive('archive1') with repository: manifest = repository.get(Manifest.MANIFEST_ID) corrupted_manifest = manifest + b'corrupted!' repository.put(Manifest.MANIFEST_ID, corrupted_manifest) chunk = repository.get(archive.id) corrupted_chunk = chunk + b'corrupted!' repository.put(archive.id, corrupted_chunk) repository.commit() self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('archive2', output) self.cmd('check', self.repository_location, exit_code=0) def test_manifest_rebuild_duplicate_archive(self): archive, repository = self.open_archive('archive1') key = archive.key with repository: manifest = repository.get(Manifest.MANIFEST_ID) corrupted_manifest = manifest + b'corrupted!' repository.put(Manifest.MANIFEST_ID, corrupted_manifest) archive = msgpack.packb({ 'cmdline': [], 'items': [], 'hostname': 'foo', 'username': 'bar', 'name': 'archive1', 'time': '2016-12-15T18:49:51.849711', 'version': 1, }) archive_id = key.id_hash(archive) repository.put(archive_id, key.encrypt(archive)) repository.commit() self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) output = self.cmd('list', self.repository_location) self.assert_in('archive1', output) self.assert_in('archive1.1', output) self.assert_in('archive2', output) def test_extra_chunks(self): self.cmd('check', self.repository_location, exit_code=0) with Repository(self.repository_location, exclusive=True) as repository: repository.put(b'01234567890123456789012345678901', b'xxxx') repository.commit() self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) self.cmd('check', self.repository_location, exit_code=0) self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0) def _test_verify_data(self, *init_args): shutil.rmtree(self.repository_path) self.cmd('init', self.repository_location, *init_args) self.create_src_archive('archive1') archive, repository = self.open_archive('archive1') with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): chunk = item.chunks[-1] data = repository.get(chunk.id) + b'1234' repository.put(chunk.id, data) break repository.commit() self.cmd('check', self.repository_location, exit_code=0) output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1) assert bin_to_hex(chunk.id) + ', integrity error' in output # repair (heal is tested in another test) output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0) assert bin_to_hex(chunk.id) + ', integrity error' in output assert 'testsuite/archiver.py: New missing file chunk detected' in output def test_verify_data(self): self._test_verify_data('--encryption', 'repokey') def test_verify_data_unencrypted(self): self._test_verify_data('--encryption', 'none') def test_empty_repository(self): with Repository(self.repository_location, exclusive=True) as repository: for id_ in repository.list(): repository.delete(id_) repository.commit() self.cmd('check', self.repository_location, exit_code=1) def test_attic013_acl_bug(self): # Attic up to release 0.13 contained a bug where every item unintentionally received # a b'acl'=None key-value pair. # This bug can still live on in Borg repositories (through borg upgrade). class Attic013Item: def as_dict(self): return { # These are required b'path': '1234', b'mtime': 0, b'mode': 0, b'user': b'0', b'group': b'0', b'uid': 0, b'gid': 0, # acl is the offending key. b'acl': None, } archive, repository = self.open_archive('archive1') with repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest) as cache: archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True) archive.items_buffer.add(Attic013Item()) archive.save() self.cmd('check', self.repository_location, exit_code=0) self.cmd('list', self.repository_location + '::0.13', exit_code=0) class ManifestAuthenticationTest(ArchiverTestCaseBase): def spoof_manifest(self, repository): with repository: _, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({ 'version': 1, 'archives': {}, 'config': {}, 'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT), }))) repository.commit() def test_fresh_init_tam_required(self): self.cmd('init', '--encryption=repokey', self.repository_location) repository = Repository(self.repository_path, exclusive=True) with repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({ 'version': 1, 'archives': {}, 'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT), }))) repository.commit() with pytest.raises(TAMRequiredError): self.cmd('list', self.repository_location) def test_not_required(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1234') repository = Repository(self.repository_path, exclusive=True) with repository: shutil.rmtree(get_security_dir(bin_to_hex(repository.id))) _, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) key.tam_required = False key.change_passphrase(key._passphrase) manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID))) del manifest[b'tam'] repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest))) repository.commit() output = self.cmd('list', '--debug', self.repository_location) assert 'archive1234' in output assert 'TAM not found and not required' in output # Run upgrade self.cmd('upgrade', '--tam', self.repository_location) # Manifest must be authenticated now output = self.cmd('list', '--debug', self.repository_location) assert 'archive1234' in output assert 'TAM-verified manifest' in output # Try to spoof / modify pre-1.0.9 self.spoof_manifest(repository) # Fails with pytest.raises(TAMRequiredError): self.cmd('list', self.repository_location) # Force upgrade self.cmd('upgrade', '--tam', '--force', self.repository_location) self.cmd('list', self.repository_location) def test_disable(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1234') self.cmd('upgrade', '--disable-tam', self.repository_location) repository = Repository(self.repository_path, exclusive=True) self.spoof_manifest(repository) assert not self.cmd('list', self.repository_location) def test_disable2(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1234') repository = Repository(self.repository_path, exclusive=True) self.spoof_manifest(repository) self.cmd('upgrade', '--disable-tam', self.repository_location) assert not self.cmd('list', self.repository_location) class RemoteArchiverTestCase(ArchiverTestCase): prefix = '__testsuite__:' def open_repository(self): return RemoteRepository(Location(self.repository_location)) def test_remote_repo_restrict_to_path(self): # restricted to repo directory itself: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]): self.cmd('init', '--encryption=repokey', self.repository_location) # restricted to repo directory itself, fail for other directories with same prefix: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]): with pytest.raises(PathNotAllowed): self.cmd('init', '--encryption=repokey', self.repository_location + '_0') # restricted to a completely different path: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']): with pytest.raises(PathNotAllowed): self.cmd('init', '--encryption=repokey', self.repository_location + '_1') path_prefix = os.path.dirname(self.repository_path) # restrict to repo directory's parent directory: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]): self.cmd('init', '--encryption=repokey', self.repository_location + '_2') # restrict to repo directory's parent directory and another directory: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]): self.cmd('init', '--encryption=repokey', self.repository_location + '_3') def test_remote_repo_restrict_to_repository(self): # restricted to repo directory itself: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]): self.cmd('init', '--encryption=repokey', self.repository_location) parent_path = os.path.join(self.repository_path, '..') with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]): with pytest.raises(PathNotAllowed): self.cmd('init', '--encryption=repokey', self.repository_location) @unittest.skip('only works locally') def test_debug_put_get_delete_obj(self): pass @unittest.skip('only works locally') def test_config(self): pass def test_strip_components_doesnt_leak(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('dir/file', contents=b"test file contents 1") self.create_regular_file('dir/file2', contents=b"test file contents 2") self.create_regular_file('skipped-file1', contents=b"test file contents 3") self.create_regular_file('skipped-file2', contents=b"test file contents 4") self.create_regular_file('skipped-file3', contents=b"test file contents 5") self.cmd('create', self.repository_location + '::test', 'input') marker = 'cached responses left in RemoteRepository' with changedir('output'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3') self.assert_true(marker not in res) with self.assert_creates_file('file'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2') self.assert_true(marker not in res) with self.assert_creates_file('dir/file'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1') self.assert_true(marker not in res) with self.assert_creates_file('input/dir/file'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0') self.assert_true(marker not in res) class ArchiverCorruptionTestCase(ArchiverTestCaseBase): def setUp(self): super().setUp() self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path'] def corrupt(self, file, amount=1): with open(file, 'r+b') as fd: fd.seek(-amount, io.SEEK_END) corrupted = bytes(255-c for c in fd.read(amount)) fd.seek(-amount, io.SEEK_END) fd.write(corrupted) def test_cache_chunks(self): self.corrupt(os.path.join(self.cache_path, 'chunks')) if self.FORK_DEFAULT: out = self.cmd('info', self.repository_location, exit_code=2) assert 'failed integrity check' in out else: with pytest.raises(FileIntegrityError): self.cmd('info', self.repository_location) def test_cache_files(self): self.cmd('create', self.repository_location + '::test', 'input') self.corrupt(os.path.join(self.cache_path, 'files')) out = self.cmd('create', self.repository_location + '::test1', 'input') # borg warns about the corrupt files cache, but then continues without files cache. assert 'files cache is corrupted' in out def test_chunks_archive(self): self.cmd('create', self.repository_location + '::test1', 'input') # Find ID of test1 so we can corrupt it later :) target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip() self.cmd('create', self.repository_location + '::test2', 'input') # Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d self.cmd('delete', '--cache-only', self.repository_location) self.cmd('info', self.repository_location, '--json') chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d') assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each self.corrupt(os.path.join(chunks_archive, target_id + '.compact')) # Trigger cache sync by changing the manifest ID in the cache config config_path = os.path.join(self.cache_path, 'config') config = ConfigParser(interpolation=None) config.read(config_path) config.set('cache', 'manifest', bin_to_hex(bytes(32))) with open(config_path, 'w') as fd: config.write(fd) # Cache sync notices corrupted archive chunks, but automatically recovers. out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1) assert 'Reading cached archive chunk index for test1' in out assert 'Cached archive chunk index of test1 is corrupted' in out assert 'Fetching and building archive index for test1' in out def test_old_version_interfered(self): # Modify the main manifest ID without touching the manifest ID in the integrity section. # This happens if a version without integrity checking modifies the cache. config_path = os.path.join(self.cache_path, 'config') config = ConfigParser(interpolation=None) config.read(config_path) config.set('cache', 'manifest', bin_to_hex(bytes(32))) with open(config_path, 'w') as fd: config.write(fd) out = self.cmd('info', self.repository_location) assert 'Cache integrity data not available: old Borg version modified the cache.' in out class DiffArchiverTestCase(ArchiverTestCaseBase): def test_basic_functionality(self): # Initialize test folder self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) # Setup files for the first snapshot self.create_regular_file('file_unchanged', size=128) self.create_regular_file('file_removed', size=256) self.create_regular_file('file_removed2', size=512) self.create_regular_file('file_replaced', size=1024) os.mkdir('input/dir_replaced_with_file') os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755) os.mkdir('input/dir_removed') if are_symlinks_supported(): os.mkdir('input/dir_replaced_with_link') os.symlink('input/dir_replaced_with_file', 'input/link_changed') os.symlink('input/file_unchanged', 'input/link_removed') os.symlink('input/file_removed2', 'input/link_target_removed') os.symlink('input/empty', 'input/link_target_contents_changed') os.symlink('input/empty', 'input/link_replaced_by_file') if are_hardlinks_supported(): os.link('input/file_replaced', 'input/hardlink_target_replaced') os.link('input/empty', 'input/hardlink_contents_changed') os.link('input/file_removed', 'input/hardlink_removed') os.link('input/file_removed2', 'input/hardlink_target_removed') # Create the first snapshot self.cmd('create', self.repository_location + '::test0', 'input') # Setup files for the second snapshot self.create_regular_file('file_added', size=2048) self.create_regular_file('file_empty_added', size=0) os.unlink('input/file_replaced') self.create_regular_file('file_replaced', contents=b'0' * 4096) os.unlink('input/file_removed') os.unlink('input/file_removed2') os.rmdir('input/dir_replaced_with_file') self.create_regular_file('dir_replaced_with_file', size=8192) os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755) os.mkdir('input/dir_added') os.rmdir('input/dir_removed') if are_symlinks_supported(): os.rmdir('input/dir_replaced_with_link') os.symlink('input/dir_added', 'input/dir_replaced_with_link') os.unlink('input/link_changed') os.symlink('input/dir_added', 'input/link_changed') os.symlink('input/dir_added', 'input/link_added') os.unlink('input/link_replaced_by_file') self.create_regular_file('link_replaced_by_file', size=16384) os.unlink('input/link_removed') if are_hardlinks_supported(): os.unlink('input/hardlink_removed') os.link('input/file_added', 'input/hardlink_added') with open('input/empty', 'ab') as fd: fd.write(b'appended_data') # Create the second snapshot self.cmd('create', self.repository_location + '::test1a', 'input') self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input') def do_asserts(output, can_compare_ids): # File contents changed (deleted and replaced with a new file) change = 'B' if can_compare_ids else '{:<19}'.format('modified') assert 'file_replaced' in output # added to debug #3494 assert '{} input/file_replaced'.format(change) in output # File unchanged assert 'input/file_unchanged' not in output # Directory replaced with a regular file if 'BORG_TESTS_IGNORE_MODES' not in os.environ: assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output # Basic directory cases assert 'added directory input/dir_added' in output assert 'removed directory input/dir_removed' in output if are_symlinks_supported(): # Basic symlink cases assert 'changed link input/link_changed' in output assert 'added link input/link_added' in output assert 'removed link input/link_removed' in output # Symlink replacing or being replaced assert '] input/dir_replaced_with_link' in output assert '] input/link_replaced_by_file' in output # Symlink target removed. Should not affect the symlink at all. assert 'input/link_target_removed' not in output # The inode has two links and the file contents changed. Borg # should notice the changes in both links. However, the symlink # pointing to the file is not changed. change = '0 B' if can_compare_ids else '{:<19}'.format('modified') assert '{} input/empty'.format(change) in output if are_hardlinks_supported(): assert '{} input/hardlink_contents_changed'.format(change) in output if are_symlinks_supported(): assert 'input/link_target_contents_changed' not in output # Added a new file and a hard link to it. Both links to the same # inode should appear as separate files. assert 'added 2.05 kB input/file_added' in output if are_hardlinks_supported(): assert 'added 2.05 kB input/hardlink_added' in output # check if a diff between non-existent and empty new file is found assert 'added 0 B input/file_empty_added' in output # The inode has two links and both of them are deleted. They should # appear as two deleted files. assert 'removed 256 B input/file_removed' in output if are_hardlinks_supported(): assert 'removed 256 B input/hardlink_removed' in output # Another link (marked previously as the source in borg) to the # same inode was removed. This should not change this link at all. if are_hardlinks_supported(): assert 'input/hardlink_target_removed' not in output # Another link (marked previously as the source in borg) to the # same inode was replaced with a new regular file. This should not # change this link at all. if are_hardlinks_supported(): assert 'input/hardlink_target_replaced' not in output do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True) # We expect exit_code=1 due to the chunker params warning do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False) def test_sort_option(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('a_file_removed', size=8) self.create_regular_file('f_file_removed', size=16) self.create_regular_file('c_file_changed', size=32) self.create_regular_file('e_file_changed', size=64) self.cmd('create', self.repository_location + '::test0', 'input') os.unlink('input/a_file_removed') os.unlink('input/f_file_removed') os.unlink('input/c_file_changed') os.unlink('input/e_file_changed') self.create_regular_file('c_file_changed', size=512) self.create_regular_file('e_file_changed', size=1024) self.create_regular_file('b_file_added', size=128) self.create_regular_file('d_file_added', size=256) self.cmd('create', self.repository_location + '::test1', 'input') output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1') expected = [ 'a_file_removed', 'b_file_added', 'c_file_changed', 'd_file_added', 'e_file_changed', 'f_file_removed', ] assert all(x in line for x, line in zip(expected, output.splitlines())) def test_get_args(): archiver = Archiver() # everything normal: # first param is argv as produced by ssh forced command, # second param is like from SSH_ORIGINAL_COMMAND env variable args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ], 'borg serve --info --umask=0027') assert args.func == archiver.do_serve assert args.restrict_to_paths == ['/p1', '/p2'] assert args.umask == 0o027 assert args.log_level == 'info' # similar, but with --restrict-to-repository args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ], 'borg serve --info --umask=0027') assert args.restrict_to_repositories == ['/r1', '/r2'] # trying to cheat - break out of path restriction args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ], 'borg serve --restrict-to-path=/') assert args.restrict_to_paths == ['/p1', '/p2'] # trying to cheat - break out of repository restriction args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ], 'borg serve --restrict-to-repository=/') assert args.restrict_to_repositories == ['/r1', '/r2'] # trying to cheat - break below repository restriction args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ], 'borg serve --restrict-to-repository=/r1/below') assert args.restrict_to_repositories == ['/r1', '/r2'] # trying to cheat - try to execute different subcommand args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ], 'borg init --encryption=repokey /') assert args.func == archiver.do_serve # Check that environment variables in the forced command don't cause issues. If the command # were not forced, environment variables would be interpreted by the shell, but this does not # happen for forced commands - we get the verbatim command line and need to deal with env vars. args = archiver.get_args(['borg', 'serve', ], 'BORG_HOSTNAME_IS_UNIQUE=yes borg serve --info') assert args.func == archiver.do_serve def test_compare_chunk_contents(): def ccc(a, b): chunks_a = [data for data in a] chunks_b = [data for data in b] compare1 = Archiver.compare_chunk_contents(iter(chunks_a), iter(chunks_b)) compare2 = Archiver.compare_chunk_contents(iter(chunks_b), iter(chunks_a)) assert compare1 == compare2 return compare1 assert ccc([ b'1234', b'567A', b'bC' ], [ b'1', b'23', b'4567A', b'b', b'C' ]) # one iterator exhausted before the other assert not ccc([ b'12345', ], [ b'1234', b'56' ]) # content mismatch assert not ccc([ b'1234', b'65' ], [ b'1234', b'56' ]) # first is the prefix of second assert not ccc([ b'1234', b'56' ], [ b'1234', b'565' ]) class TestBuildFilter: @staticmethod def peek_and_store_hardlink_masters(item, matched): pass def test_basic(self): matcher = PatternMatcher() matcher.add([parse_pattern('included')], IECommand.Include) filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0) assert filter(Item(path='included')) assert filter(Item(path='included/file')) assert not filter(Item(path='something else')) def test_empty(self): matcher = PatternMatcher(fallback=True) filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0) assert filter(Item(path='anything')) def test_strip_components(self): matcher = PatternMatcher(fallback=True) filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1) assert not filter(Item(path='shallow')) assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized... assert filter(Item(path='deep enough/file')) assert filter(Item(path='something/dir/file')) class TestCommonOptions: @staticmethod def define_common_options(add_common_option): add_common_option('-h', '--help', action='help', help='show this help message and exit') add_common_option('--critical', dest='log_level', help='foo', action='store_const', const='critical', default='warning') add_common_option('--error', dest='log_level', help='foo', action='store_const', const='error', default='warning') add_common_option('--append', dest='append', help='foo', action='append', metavar='TOPIC', default=[]) add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo') add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1, help='(default: %(default)d).') @pytest.fixture def basic_parser(self): parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False) parser.common_options = Archiver.CommonOptions(self.define_common_options, suffix_precedence=('_level0', '_level1')) return parser @pytest.fixture def subparsers(self, basic_parser): return basic_parser.add_subparsers(title='required arguments', metavar='') @pytest.fixture def parser(self, basic_parser): basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True) return basic_parser @pytest.fixture def common_parser(self, parser): common_parser = argparse.ArgumentParser(add_help=False, prog='test') parser.common_options.add_common_group(common_parser, '_level1') return common_parser @pytest.fixture def parse_vars_from_line(self, parser, subparsers, common_parser): subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False, description='foo', epilog='bar', help='baz', formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=1234) subparser.add_argument('--append-only', dest='append_only', action='store_true') def parse_vars_from_line(*line): print(line) args = parser.parse_args(line) parser.common_options.resolve(args) return vars(args) return parse_vars_from_line def test_simple(self, parse_vars_from_line): assert parse_vars_from_line('--error') == { 'append': [], 'lock_wait': 1, 'log_level': 'error', 'progress': False } assert parse_vars_from_line('--error', 'subcommand', '--critical') == { 'append': [], 'lock_wait': 1, 'log_level': 'critical', 'progress': False, 'append_only': False, 'func': 1234, } with pytest.raises(SystemExit): parse_vars_from_line('--append-only', 'subcommand') assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == { 'append': ['foo', 'bar', 'baz'], 'lock_wait': 1, 'log_level': 'warning', 'progress': False, 'append_only': False, 'func': 1234, } @pytest.mark.parametrize('position', ('before', 'after', 'both')) @pytest.mark.parametrize('flag,args_key,args_value', ( ('-p', 'progress', True), ('--lock-wait=3', 'lock_wait', 3), )) def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value): line = [] if position in ('before', 'both'): line.append(flag) line.append('subcommand') if position in ('after', 'both'): line.append(flag) result = { 'append': [], 'lock_wait': 1, 'log_level': 'warning', 'progress': False, 'append_only': False, 'func': 1234, } result[args_key] = args_value assert parse_vars_from_line(*line) == result def test_parse_storage_quota(): assert parse_storage_quota('50M') == 50 * 1000**2 with pytest.raises(argparse.ArgumentTypeError): parse_storage_quota('5M') def get_all_parsers(): """ Return dict mapping command to parser. """ parser = Archiver(prog='borg').build_parser() borgfs_parser = Archiver(prog='borgfs').build_parser() parsers = {} def discover_level(prefix, parser, Archiver, extra_choices=None): choices = {} for action in parser._actions: if action.choices is not None and 'SubParsersAction' in str(action.__class__): for cmd, parser in action.choices.items(): choices[prefix + cmd] = parser if extra_choices is not None: choices.update(extra_choices) if prefix and not choices: return for command, parser in sorted(choices.items()): discover_level(command + " ", parser, Archiver) parsers[command] = parser discover_level("", parser, Archiver, {'borgfs': borgfs_parser}) return parsers @pytest.mark.parametrize('command, parser', list(get_all_parsers().items())) def test_help_formatting(command, parser): if isinstance(parser.epilog, RstToTextLazy): assert parser.epilog.rst @pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items())) def test_help_formatting_helptexts(topic, helptext): assert str(rst_to_terminal(helptext)) borgbackup-1.1.15/src/borg/testsuite/checksums.py0000644000175000017500000000326513771325506022023 0ustar useruser00000000000000import os import zlib from binascii import unhexlify import pytest from ..algorithms import checksums from ..helpers import bin_to_hex crc32_implementations = [checksums.crc32_slice_by_8] if checksums.have_clmul: crc32_implementations.append(checksums.crc32_clmul) @pytest.mark.parametrize('implementation', crc32_implementations) def test_crc32(implementation): # This includes many critical values, like misc. length and misc. aligned start addresses. data = os.urandom(300) mv = memoryview(data) initial_crc = 0x12345678 for start in range(0, 4): # 4B / int32 alignment, head processing for length in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 127, 128, 129, 130, 131, 132, 133, 134, 135, 255, 256, 257, ]: d = mv[start:start+length] assert zlib.crc32(d, initial_crc) == implementation(d, initial_crc) def test_xxh64(): assert bin_to_hex(checksums.xxh64(b'test', 123)) == '2b81b9401bef86cf' assert bin_to_hex(checksums.xxh64(b'test')) == '4fdcca5ddb678139' assert bin_to_hex(checksums.xxh64(unhexlify( '6f663f01c118abdea553373d5eae44e7dac3b6829b46b9bbeff202b6c592c22d724' 'fb3d25a347cca6c5b8f20d567e4bb04b9cfa85d17f691590f9a9d32e8ccc9102e9d' 'cf8a7e6716280cd642ce48d03fdf114c9f57c20d9472bb0f81c147645e6fa3d331'))) == '35d5d2f545d9511a' def test_streaming_xxh64(): hasher = checksums.StreamingXXH64(123) hasher.update(b'te') hasher.update(b'st') assert bin_to_hex(hasher.digest()) == hasher.hexdigest() == '2b81b9401bef86cf' borgbackup-1.1.15/src/borg/testsuite/hashindex.py0000644000175000017500000004673213771325506022017 0ustar useruser00000000000000import base64 import hashlib import io import os import tempfile import zlib from ..hashindex import NSIndex, ChunkIndex, ChunkIndexEntry from .. import hashindex from ..crypto.file_integrity import IntegrityCheckedFile, FileIntegrityError from . import BaseTestCase # Note: these tests are part of the self test, do not use or import py.test functionality here. # See borg.selftest for details. If you add/remove test methods, update SELFTEST_COUNT def H(x): # make some 32byte long thing that depends on x return bytes('%-0.32d' % x, 'ascii') def H2(x): # like H(x), but with pseudo-random distribution of the output value return hashlib.sha256(H(x)).digest() class HashIndexTestCase(BaseTestCase): def _generic_test(self, cls, make_value, sha): idx = cls() self.assert_equal(len(idx), 0) # Test set for x in range(100): idx[H(x)] = make_value(x) self.assert_equal(len(idx), 100) for x in range(100): self.assert_equal(idx[H(x)], make_value(x)) # Test update for x in range(100): idx[H(x)] = make_value(x * 2) self.assert_equal(len(idx), 100) for x in range(100): self.assert_equal(idx[H(x)], make_value(x * 2)) # Test delete for x in range(50): del idx[H(x)] # Test some keys still in there for x in range(50, 100): assert H(x) in idx # Test some keys not there any more for x in range(50): assert H(x) not in idx # Test delete non-existing key for x in range(50): self.assert_raises(KeyError, idx.__delitem__, H(x)) self.assert_equal(len(idx), 50) idx_name = tempfile.NamedTemporaryFile() idx.write(idx_name.name) del idx # Verify file contents with open(idx_name.name, 'rb') as fd: self.assert_equal(hashlib.sha256(fd.read()).hexdigest(), sha) # Make sure we can open the file idx = cls.read(idx_name.name) self.assert_equal(len(idx), 50) for x in range(50, 100): self.assert_equal(idx[H(x)], make_value(x * 2)) idx.clear() self.assert_equal(len(idx), 0) idx.write(idx_name.name) del idx self.assert_equal(len(cls.read(idx_name.name)), 0) def test_nsindex(self): self._generic_test(NSIndex, lambda x: (x, x), '85f72b036c692c8266e4f51ccf0cff2147204282b5e316ae508d30a448d88fef') def test_chunkindex(self): self._generic_test(ChunkIndex, lambda x: (x, x, x), 'c83fdf33755fc37879285f2ecfc5d1f63b97577494902126b6fb6f3e4d852488') def test_resize(self): n = 2000 # Must be >= MIN_BUCKETS idx_name = tempfile.NamedTemporaryFile() idx = NSIndex() idx.write(idx_name.name) initial_size = os.path.getsize(idx_name.name) self.assert_equal(len(idx), 0) for x in range(n): idx[H(x)] = x, x idx.write(idx_name.name) self.assert_true(initial_size < os.path.getsize(idx_name.name)) for x in range(n): del idx[H(x)] self.assert_equal(len(idx), 0) idx.write(idx_name.name) self.assert_equal(initial_size, os.path.getsize(idx_name.name)) def test_iteritems(self): idx = NSIndex() for x in range(100): idx[H(x)] = x, x iterator = idx.iteritems() all = list(iterator) self.assert_equal(len(all), 100) # iterator is already exhausted by list(): self.assert_raises(StopIteration, next, iterator) second_half = list(idx.iteritems(marker=all[49][0])) self.assert_equal(len(second_half), 50) self.assert_equal(second_half, all[50:]) def test_chunkindex_merge(self): idx1 = ChunkIndex() idx1[H(1)] = 1, 100, 100 idx1[H(2)] = 2, 200, 200 idx1[H(3)] = 3, 300, 300 # no H(4) entry idx2 = ChunkIndex() idx2[H(1)] = 4, 100, 100 idx2[H(2)] = 5, 200, 200 # no H(3) entry idx2[H(4)] = 6, 400, 400 idx1.merge(idx2) assert idx1[H(1)] == (5, 100, 100) assert idx1[H(2)] == (7, 200, 200) assert idx1[H(3)] == (3, 300, 300) assert idx1[H(4)] == (6, 400, 400) def test_chunkindex_summarize(self): idx = ChunkIndex() idx[H(1)] = 1, 1000, 100 idx[H(2)] = 2, 2000, 200 idx[H(3)] = 3, 3000, 300 size, csize, unique_size, unique_csize, unique_chunks, chunks = idx.summarize() assert size == 1000 + 2 * 2000 + 3 * 3000 assert csize == 100 + 2 * 200 + 3 * 300 assert unique_size == 1000 + 2000 + 3000 assert unique_csize == 100 + 200 + 300 assert chunks == 1 + 2 + 3 assert unique_chunks == 3 class HashIndexExtraTestCase(BaseTestCase): """These tests are separate because they should not become part of the selftest. """ def test_chunk_indexer(self): # see _hashindex.c hash_sizes, we want to be close to the max. load # because interesting errors happen there. key_count = int(65537 * ChunkIndex.MAX_LOAD_FACTOR) - 10 index = ChunkIndex(key_count) all_keys = [hashlib.sha256(H(k)).digest() for k in range(key_count)] # we're gonna delete 1/3 of all_keys, so let's split them 2/3 and 1/3: keys, to_delete_keys = all_keys[0:(2*key_count//3)], all_keys[(2*key_count//3):] for i, key in enumerate(keys): index[key] = (i, i, i) for i, key in enumerate(to_delete_keys): index[key] = (i, i, i) for key in to_delete_keys: del index[key] for i, key in enumerate(keys): assert index[key] == (i, i, i) for key in to_delete_keys: assert index.get(key) is None # now delete every key still in the index for key in keys: del index[key] # the index should now be empty assert list(index.iteritems()) == [] class HashIndexSizeTestCase(BaseTestCase): def test_size_on_disk(self): idx = ChunkIndex() assert idx.size() == 18 + 1031 * (32 + 3 * 4) def test_size_on_disk_accurate(self): idx = ChunkIndex() for i in range(1234): idx[H(i)] = i, i**2, i**3 with tempfile.NamedTemporaryFile() as file: idx.write(file.name) size = os.path.getsize(file.name) assert idx.size() == size class HashIndexRefcountingTestCase(BaseTestCase): def test_chunkindex_limit(self): idx = ChunkIndex() idx[H(1)] = ChunkIndex.MAX_VALUE - 1, 1, 2 # 5 is arbitray, any number of incref/decrefs shouldn't move it once it's limited for i in range(5): # first incref to move it to the limit refcount, *_ = idx.incref(H(1)) assert refcount == ChunkIndex.MAX_VALUE for i in range(5): refcount, *_ = idx.decref(H(1)) assert refcount == ChunkIndex.MAX_VALUE def _merge(self, refcounta, refcountb): def merge(refcount1, refcount2): idx1 = ChunkIndex() idx1[H(1)] = refcount1, 1, 2 idx2 = ChunkIndex() idx2[H(1)] = refcount2, 1, 2 idx1.merge(idx2) refcount, *_ = idx1[H(1)] return refcount result = merge(refcounta, refcountb) # check for commutativity assert result == merge(refcountb, refcounta) return result def test_chunkindex_merge_limit1(self): # Check that it does *not* limit at MAX_VALUE - 1 # (MAX_VALUE is odd) half = ChunkIndex.MAX_VALUE // 2 assert self._merge(half, half) == ChunkIndex.MAX_VALUE - 1 def test_chunkindex_merge_limit2(self): # 3000000000 + 2000000000 > MAX_VALUE assert self._merge(3000000000, 2000000000) == ChunkIndex.MAX_VALUE def test_chunkindex_merge_limit3(self): # Crossover point: both addition and limit semantics will yield the same result half = ChunkIndex.MAX_VALUE // 2 assert self._merge(half + 1, half) == ChunkIndex.MAX_VALUE def test_chunkindex_merge_limit4(self): # Beyond crossover, result of addition would be 2**31 half = ChunkIndex.MAX_VALUE // 2 assert self._merge(half + 2, half) == ChunkIndex.MAX_VALUE assert self._merge(half + 1, half + 1) == ChunkIndex.MAX_VALUE def test_chunkindex_add(self): idx1 = ChunkIndex() idx1.add(H(1), 5, 6, 7) assert idx1[H(1)] == (5, 6, 7) idx1.add(H(1), 1, 2, 3) assert idx1[H(1)] == (6, 2, 3) def test_incref_limit(self): idx1 = ChunkIndex() idx1[H(1)] = (ChunkIndex.MAX_VALUE, 6, 7) idx1.incref(H(1)) refcount, *_ = idx1[H(1)] assert refcount == ChunkIndex.MAX_VALUE def test_decref_limit(self): idx1 = ChunkIndex() idx1[H(1)] = ChunkIndex.MAX_VALUE, 6, 7 idx1.decref(H(1)) refcount, *_ = idx1[H(1)] assert refcount == ChunkIndex.MAX_VALUE def test_decref_zero(self): idx1 = ChunkIndex() idx1[H(1)] = 0, 0, 0 with self.assert_raises(AssertionError): idx1.decref(H(1)) def test_incref_decref(self): idx1 = ChunkIndex() idx1.add(H(1), 5, 6, 7) assert idx1[H(1)] == (5, 6, 7) idx1.incref(H(1)) assert idx1[H(1)] == (6, 6, 7) idx1.decref(H(1)) assert idx1[H(1)] == (5, 6, 7) def test_setitem_raises(self): idx1 = ChunkIndex() with self.assert_raises(AssertionError): idx1[H(1)] = ChunkIndex.MAX_VALUE + 1, 0, 0 def test_keyerror(self): idx = ChunkIndex() with self.assert_raises(KeyError): idx.incref(H(1)) with self.assert_raises(KeyError): idx.decref(H(1)) with self.assert_raises(KeyError): idx[H(1)] with self.assert_raises(OverflowError): idx.add(H(1), -1, 0, 0) class HashIndexDataTestCase(BaseTestCase): # This bytestring was created with 1.0-maint at c2f9533 HASHINDEX = b'eJzt0L0NgmAUhtHLT0LDEI6AuAEhMVYmVnSuYefC7AB3Aj9KNedJbnfyFne6P67P27w0EdG1Eac+Cm1ZybAsy7Isy7Isy7Isy7I' \ b'sy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7Isy7LsL9nhc+cqTZ' \ b'3XlO2Ys++Du5fX+l1/YFmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVmWZVn2/+0O2rYccw==' def _serialize_hashindex(self, idx): with tempfile.TemporaryDirectory() as tempdir: file = os.path.join(tempdir, 'idx') idx.write(file) with open(file, 'rb') as f: return self._pack(f.read()) def _deserialize_hashindex(self, bytestring): with tempfile.TemporaryDirectory() as tempdir: file = os.path.join(tempdir, 'idx') with open(file, 'wb') as f: f.write(self._unpack(bytestring)) return ChunkIndex.read(file) def _pack(self, bytestring): return base64.b64encode(zlib.compress(bytestring)) def _unpack(self, bytestring): return zlib.decompress(base64.b64decode(bytestring)) def test_identical_creation(self): idx1 = ChunkIndex() idx1[H(1)] = 1, 2, 3 idx1[H(2)] = 2**31 - 1, 0, 0 idx1[H(3)] = 4294962296, 0, 0 # 4294962296 is -5000 interpreted as an uint32_t serialized = self._serialize_hashindex(idx1) assert self._unpack(serialized) == self._unpack(self.HASHINDEX) def test_read_known_good(self): idx1 = self._deserialize_hashindex(self.HASHINDEX) assert idx1[H(1)] == (1, 2, 3) assert idx1[H(2)] == (2**31 - 1, 0, 0) assert idx1[H(3)] == (4294962296, 0, 0) idx2 = ChunkIndex() idx2[H(3)] = 2**32 - 123456, 6, 7 idx1.merge(idx2) assert idx1[H(3)] == (ChunkIndex.MAX_VALUE, 6, 7) class HashIndexIntegrityTestCase(HashIndexDataTestCase): def write_integrity_checked_index(self, tempdir): idx = self._deserialize_hashindex(self.HASHINDEX) file = os.path.join(tempdir, 'idx') with IntegrityCheckedFile(path=file, write=True) as fd: idx.write(fd) integrity_data = fd.integrity_data assert 'final' in integrity_data assert 'HashHeader' in integrity_data return file, integrity_data def test_integrity_checked_file(self): with tempfile.TemporaryDirectory() as tempdir: file, integrity_data = self.write_integrity_checked_index(tempdir) with open(file, 'r+b') as fd: fd.write(b'Foo') with self.assert_raises(FileIntegrityError): with IntegrityCheckedFile(path=file, write=False, integrity_data=integrity_data) as fd: ChunkIndex.read(fd) class HashIndexCompactTestCase(HashIndexDataTestCase): def index(self, num_entries, num_buckets): index_data = io.BytesIO() index_data.write(b'BORG_IDX') # num_entries index_data.write(num_entries.to_bytes(4, 'little')) # num_buckets index_data.write(num_buckets.to_bytes(4, 'little')) # key_size index_data.write((32).to_bytes(1, 'little')) # value_size index_data.write((3 * 4).to_bytes(1, 'little')) self.index_data = index_data def index_from_data(self): self.index_data.seek(0) index = ChunkIndex.read(self.index_data) return index def index_to_data(self, index): data = io.BytesIO() index.write(data) return data.getvalue() def index_from_data_compact_to_data(self): index = self.index_from_data() index.compact() compact_index = self.index_to_data(index) return compact_index def write_entry(self, key, *values): self.index_data.write(key) for value in values: self.index_data.write(value.to_bytes(4, 'little')) def write_empty(self, key): self.write_entry(key, 0xffffffff, 0, 0) def write_deleted(self, key): self.write_entry(key, 0xfffffffe, 0, 0) def test_simple(self): self.index(num_entries=3, num_buckets=6) self.write_entry(H2(0), 1, 2, 3) self.write_deleted(H2(1)) self.write_empty(H2(2)) self.write_entry(H2(3), 5, 6, 7) self.write_entry(H2(4), 8, 9, 10) self.write_empty(H2(5)) compact_index = self.index_from_data_compact_to_data() self.index(num_entries=3, num_buckets=3) self.write_entry(H2(0), 1, 2, 3) self.write_entry(H2(3), 5, 6, 7) self.write_entry(H2(4), 8, 9, 10) assert compact_index == self.index_data.getvalue() def test_first_empty(self): self.index(num_entries=3, num_buckets=6) self.write_deleted(H2(1)) self.write_entry(H2(0), 1, 2, 3) self.write_empty(H2(2)) self.write_entry(H2(3), 5, 6, 7) self.write_entry(H2(4), 8, 9, 10) self.write_empty(H2(5)) compact_index = self.index_from_data_compact_to_data() self.index(num_entries=3, num_buckets=3) self.write_entry(H2(0), 1, 2, 3) self.write_entry(H2(3), 5, 6, 7) self.write_entry(H2(4), 8, 9, 10) assert compact_index == self.index_data.getvalue() def test_last_used(self): self.index(num_entries=3, num_buckets=6) self.write_deleted(H2(1)) self.write_entry(H2(0), 1, 2, 3) self.write_empty(H2(2)) self.write_entry(H2(3), 5, 6, 7) self.write_empty(H2(5)) self.write_entry(H2(4), 8, 9, 10) compact_index = self.index_from_data_compact_to_data() self.index(num_entries=3, num_buckets=3) self.write_entry(H2(0), 1, 2, 3) self.write_entry(H2(3), 5, 6, 7) self.write_entry(H2(4), 8, 9, 10) assert compact_index == self.index_data.getvalue() def test_too_few_empty_slots(self): self.index(num_entries=3, num_buckets=6) self.write_deleted(H2(1)) self.write_entry(H2(0), 1, 2, 3) self.write_entry(H2(3), 5, 6, 7) self.write_empty(H2(2)) self.write_empty(H2(5)) self.write_entry(H2(4), 8, 9, 10) compact_index = self.index_from_data_compact_to_data() self.index(num_entries=3, num_buckets=3) self.write_entry(H2(0), 1, 2, 3) self.write_entry(H2(3), 5, 6, 7) self.write_entry(H2(4), 8, 9, 10) assert compact_index == self.index_data.getvalue() def test_empty(self): self.index(num_entries=0, num_buckets=6) self.write_deleted(H2(1)) self.write_empty(H2(0)) self.write_deleted(H2(3)) self.write_empty(H2(2)) self.write_empty(H2(5)) self.write_deleted(H2(4)) compact_index = self.index_from_data_compact_to_data() self.index(num_entries=0, num_buckets=0) assert compact_index == self.index_data.getvalue() def test_merge(self): master = ChunkIndex() idx1 = ChunkIndex() idx1[H(1)] = 1, 100, 100 idx1[H(2)] = 2, 200, 200 idx1[H(3)] = 3, 300, 300 idx1.compact() assert idx1.size() == 18 + 3 * (32 + 3 * 4) master.merge(idx1) assert master[H(1)] == (1, 100, 100) assert master[H(2)] == (2, 200, 200) assert master[H(3)] == (3, 300, 300) class NSIndexTestCase(BaseTestCase): def test_nsindex_segment_limit(self): idx = NSIndex() with self.assert_raises(AssertionError): idx[H(1)] = NSIndex.MAX_VALUE + 1, 0 assert H(1) not in idx idx[H(2)] = NSIndex.MAX_VALUE, 0 assert H(2) in idx class AllIndexTestCase(BaseTestCase): def test_max_load_factor(self): assert NSIndex.MAX_LOAD_FACTOR < 1.0 assert ChunkIndex.MAX_LOAD_FACTOR < 1.0 class IndexCorruptionTestCase(BaseTestCase): def test_bug_4829(self): from struct import pack def HH(x, y): # make some 32byte long thing that depends on x and y. # same x will mean a collision in the hashtable as bucket index is computed from # first 4 bytes. giving a specific x targets bucket index x. # y is to create different keys and does not go into the bucket index calculation. # so, same x + different y --> collision return pack(' BorgBackup Printable Key Template

BorgBackup Printable Key Backup
To restore either scan the QR code below, decode it and import it using
borg key import /path/to/repo scannedfile
Or run
borg key import --paper /path/to/repo
and type in the text below.

Notes:
borgbackup-1.1.15/src/borg/cache_sync/0000755000175000017500000000000013771325773017532 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/cache_sync/cache_sync.c0000644000175000017500000000772413771325506022001 0ustar useruser00000000000000/* * Borg cache synchronizer, * high level interface. * * These routines parse msgpacked item metadata and update a HashIndex * with all chunks that are referenced from the items. * * This file only contains some initialization and buffer management. * * The parser is split in two parts, somewhat similar to lexer/parser combinations: * * unpack_template.h munches msgpack and calls a specific callback for each object * encountered (e.g. beginning of a map, an integer, a string, a map item etc.). * * unpack.h implements these callbacks and uses another state machine to * extract chunk references from it. */ #include "unpack.h" typedef struct { unpack_context ctx; char *buf; size_t head; size_t tail; size_t size; } CacheSyncCtx; static CacheSyncCtx * cache_sync_init(HashIndex *chunks) { CacheSyncCtx *ctx; if (!(ctx = (CacheSyncCtx*)malloc(sizeof(CacheSyncCtx)))) { return NULL; } unpack_init(&ctx->ctx); /* needs to be set only once */ ctx->ctx.user.chunks = chunks; ctx->ctx.user.parts.size = 0; ctx->ctx.user.parts.csize = 0; ctx->ctx.user.parts.num_files = 0; ctx->ctx.user.totals.size = 0; ctx->ctx.user.totals.csize = 0; ctx->ctx.user.totals.num_files = 0; ctx->buf = NULL; ctx->head = 0; ctx->tail = 0; ctx->size = 0; return ctx; } static void cache_sync_free(CacheSyncCtx *ctx) { if(ctx->buf) { free(ctx->buf); } free(ctx); } static const char * cache_sync_error(const CacheSyncCtx *ctx) { return ctx->ctx.user.last_error; } static uint64_t cache_sync_num_files_totals(const CacheSyncCtx *ctx) { return ctx->ctx.user.totals.num_files; } static uint64_t cache_sync_num_files_parts(const CacheSyncCtx *ctx) { return ctx->ctx.user.parts.num_files; } static uint64_t cache_sync_size_totals(const CacheSyncCtx *ctx) { return ctx->ctx.user.totals.size; } static uint64_t cache_sync_size_parts(const CacheSyncCtx *ctx) { return ctx->ctx.user.parts.size; } static uint64_t cache_sync_csize_totals(const CacheSyncCtx *ctx) { return ctx->ctx.user.totals.csize; } static uint64_t cache_sync_csize_parts(const CacheSyncCtx *ctx) { return ctx->ctx.user.parts.csize; } /** * feed data to the cache synchronizer * 0 = abort, 1 = continue * abort is a regular condition, check cache_sync_error */ static int cache_sync_feed(CacheSyncCtx *ctx, void *data, uint32_t length) { size_t new_size; int ret; char *new_buf; if(ctx->tail + length > ctx->size) { if((ctx->tail - ctx->head) + length <= ctx->size) { /* | XXXXX| -> move data in buffer backwards -> |XXXXX | */ memmove(ctx->buf, ctx->buf + ctx->head, ctx->tail - ctx->head); ctx->tail -= ctx->head; ctx->head = 0; } else { /* must expand buffer to fit all data */ new_size = (ctx->tail - ctx->head) + length; new_buf = (char*) malloc(new_size); if(!new_buf) { ctx->ctx.user.last_error = "cache_sync_feed: unable to allocate buffer"; return 0; } if(ctx->buf) { memcpy(new_buf, ctx->buf + ctx->head, ctx->tail - ctx->head); free(ctx->buf); } ctx->buf = new_buf; ctx->tail -= ctx->head; ctx->head = 0; ctx->size = new_size; } } memcpy(ctx->buf + ctx->tail, data, length); ctx->tail += length; while(1) { if(ctx->head >= ctx->tail) { return 1; /* request more bytes */ } ret = unpack_execute(&ctx->ctx, ctx->buf, ctx->tail, &ctx->head); if(ret == 1) { unpack_init(&ctx->ctx); continue; } else if(ret == 0) { return 1; } else { if(!ctx->ctx.user.last_error) { ctx->ctx.user.last_error = "Unknown error"; } return 0; } } /* unreachable */ return 1; } borgbackup-1.1.15/src/borg/cache_sync/unpack.h0000644000175000017500000002723513771325506021167 0ustar useruser00000000000000/* * Borg cache synchronizer, * based on a MessagePack for Python unpacking routine * * Copyright (C) 2009 Naoki INADA * Copyright (c) 2017 Marian Beermann * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This limits the depth of the structures we can unpack, i.e. how many containers * are nestable. */ #define MSGPACK_EMBED_STACK_SIZE (16) #include "unpack_define.h" // 2**32 - 1025 #define _MAX_VALUE ( (uint32_t) 4294966271UL ) #define MIN(x, y) ((x) < (y) ? (x): (y)) #ifdef DEBUG #define SET_LAST_ERROR(msg) \ fprintf(stderr, "cache_sync parse error: %s\n", (msg)); \ u->last_error = (msg); #else #define SET_LAST_ERROR(msg) \ u->last_error = (msg); #endif typedef struct unpack_user { /* Item.chunks and Item.part are at the top level; we don't care about anything else, * only need to track the current level to navigate arbitrary and unknown structure. * To discern keys from everything else on the top level we use expect_map_item_end. */ int level; const char *last_error; HashIndex *chunks; /* * We don't care about most stuff. This flag tells us whether we're at the chunks structure, * meaning: * {'foo': 'bar', 'chunks': [...], 'stuff': ... } * ^-HERE-^ */ int inside_chunks; /* is this item a .part file (created for checkpointing inside files)? */ int part; /* does this item have a chunks list in it? */ int has_chunks; enum { /* the next thing is a map key at the Item root level, * and it might be the "chunks" or "part" key we're looking for */ expect_map_key, /* blocking state to expect_map_key * { 'stuff': , 'chunks': [ * emk -> emie -> -> -> -> emk ecb eeboce * (nested containers are tracked via level) * emk=expect_map_key, emie=expect_map_item_end, ecb=expect_chunks_begin, * eeboce=expect_entry_begin_or_chunks_end */ expect_map_item_end, /* next thing must be the chunks array (array) */ expect_chunks_begin, /* next thing must either be another CLE (array) or end of Item.chunks (array_end) */ expect_entry_begin_or_chunks_end, /* * processing ChunkListEntry tuple: * expect_key, expect_size, expect_csize, expect_entry_end */ /* next thing must be the key (raw, l=32) */ expect_key, /* next thing must be the size (int) */ expect_size, /* next thing must be the csize (int) */ expect_csize, /* next thing must be the end of the CLE (array_end) */ expect_entry_end, expect_item_begin } expect; /* collect values here for current chunklist entry */ struct { char key[32]; uint32_t csize; uint32_t size; } current; /* summing up chunks sizes here within a single item */ struct { uint64_t size, csize; } item; /* total sizes and files count coming from all files */ struct { uint64_t size, csize, num_files; } totals; /* total sizes and files count coming from part files */ struct { uint64_t size, csize, num_files; } parts; } unpack_user; struct unpack_context; typedef struct unpack_context unpack_context; typedef int (*execute_fn)(unpack_context *ctx, const char* data, size_t len, size_t* off); #define UNEXPECTED(what) \ if(u->inside_chunks || u->expect == expect_map_key) { \ SET_LAST_ERROR("Unexpected object: " what); \ return -1; \ } static inline void unpack_init_user_state(unpack_user *u) { u->last_error = NULL; u->level = 0; u->inside_chunks = false; u->expect = expect_item_begin; } static inline int unpack_callback_uint64(unpack_user* u, int64_t d) { switch(u->expect) { case expect_size: u->current.size = d; u->expect = expect_csize; break; case expect_csize: u->current.csize = d; u->expect = expect_entry_end; break; default: UNEXPECTED("integer"); } return 0; } static inline int unpack_callback_uint32(unpack_user* u, uint32_t d) { return unpack_callback_uint64(u, d); } static inline int unpack_callback_uint16(unpack_user* u, uint16_t d) { return unpack_callback_uint64(u, d); } static inline int unpack_callback_uint8(unpack_user* u, uint8_t d) { return unpack_callback_uint64(u, d); } static inline int unpack_callback_int64(unpack_user* u, uint64_t d) { return unpack_callback_uint64(u, d); } static inline int unpack_callback_int32(unpack_user* u, int32_t d) { return unpack_callback_uint64(u, d); } static inline int unpack_callback_int16(unpack_user* u, int16_t d) { return unpack_callback_uint64(u, d); } static inline int unpack_callback_int8(unpack_user* u, int8_t d) { return unpack_callback_uint64(u, d); } /* Ain't got anything to do with those floats */ static inline int unpack_callback_double(unpack_user* u, double d) { (void)d; UNEXPECTED("double"); return 0; } static inline int unpack_callback_float(unpack_user* u, float d) { (void)d; UNEXPECTED("float"); return 0; } /* nil/true/false — I/don't/care */ static inline int unpack_callback_nil(unpack_user* u) { UNEXPECTED("nil"); return 0; } static inline int unpack_callback_true(unpack_user* u) { UNEXPECTED("true"); return 0; } static inline int unpack_callback_false(unpack_user* u) { UNEXPECTED("false"); return 0; } static inline int unpack_callback_array(unpack_user* u, unsigned int n) { switch(u->expect) { case expect_chunks_begin: /* b'chunks': [ * ^ */ u->expect = expect_entry_begin_or_chunks_end; break; case expect_entry_begin_or_chunks_end: /* b'chunks': [ ( * ^ */ if(n != 3) { SET_LAST_ERROR("Invalid chunk list entry length"); return -1; } u->expect = expect_key; break; default: if(u->inside_chunks) { SET_LAST_ERROR("Unexpected array start"); return -1; } else { u->level++; return 0; } } return 0; } static inline int unpack_callback_array_item(unpack_user* u, unsigned int current) { (void)u; (void)current; return 0; } static inline int unpack_callback_array_end(unpack_user* u) { uint32_t *cache_entry; uint32_t cache_values[3]; uint64_t refcount; switch(u->expect) { case expect_entry_end: /* b'chunks': [ ( b'1234...', 123, 345 ) * ^ */ cache_entry = (uint32_t*) hashindex_get(u->chunks, u->current.key); if(cache_entry) { refcount = _le32toh(cache_entry[0]); if(refcount > _MAX_VALUE) { SET_LAST_ERROR("invalid reference count"); return -1; } refcount += 1; cache_entry[0] = _htole32(MIN(refcount, _MAX_VALUE)); } else { /* refcount, size, csize */ cache_values[0] = _htole32(1); cache_values[1] = _htole32(u->current.size); cache_values[2] = _htole32(u->current.csize); if(!hashindex_set(u->chunks, u->current.key, cache_values)) { SET_LAST_ERROR("hashindex_set failed"); return -1; } } u->item.size += u->current.size; u->item.csize += u->current.csize; u->expect = expect_entry_begin_or_chunks_end; break; case expect_entry_begin_or_chunks_end: /* b'chunks': [ ] * ^ */ /* end of Item.chunks */ u->inside_chunks = 0; u->expect = expect_map_item_end; break; default: if(u->inside_chunks) { SET_LAST_ERROR("Invalid state transition (unexpected array end)"); return -1; } else { u->level--; return 0; } } return 0; } static inline int unpack_callback_map(unpack_user* u, unsigned int n) { (void)n; if(u->level == 0) { if(u->expect != expect_item_begin) { SET_LAST_ERROR("Invalid state transition"); /* unreachable */ return -1; } /* This begins a new Item */ u->expect = expect_map_key; u->part = 0; u->has_chunks = 0; u->item.size = 0; u->item.csize = 0; } if(u->inside_chunks) { UNEXPECTED("map"); } u->level++; return 0; } static inline int unpack_callback_map_item(unpack_user* u, unsigned int current) { (void)u; (void)current; if(u->level == 1) { switch(u->expect) { case expect_map_item_end: u->expect = expect_map_key; break; default: SET_LAST_ERROR("Unexpected map item"); return -1; } } return 0; } static inline int unpack_callback_map_end(unpack_user* u) { u->level--; if(u->inside_chunks) { SET_LAST_ERROR("Unexpected map end"); return -1; } if(u->level == 0) { /* This ends processing of an Item */ if(u->has_chunks) { if(u->part) { u->parts.num_files += 1; u->parts.size += u->item.size; u->parts.csize += u->item.csize; } u->totals.num_files += 1; u->totals.size += u->item.size; u->totals.csize += u->item.csize; } } return 0; } static inline int unpack_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int length) { /* raw = what Borg uses for binary stuff and strings as well */ /* Note: p points to an internal buffer which contains l bytes. */ (void)b; switch(u->expect) { case expect_key: if(length != 32) { SET_LAST_ERROR("Incorrect key length"); return -1; } memcpy(u->current.key, p, 32); u->expect = expect_size; break; case expect_map_key: if(length == 6 && !memcmp("chunks", p, 6)) { u->expect = expect_chunks_begin; u->inside_chunks = 1; u->has_chunks = 1; } else if(length == 4 && !memcmp("part", p, 4)) { u->expect = expect_map_item_end; u->part = 1; } else { u->expect = expect_map_item_end; } break; default: if(u->inside_chunks) { SET_LAST_ERROR("Unexpected bytes in chunks structure"); return -1; } } return 0; } static inline int unpack_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int length) { (void)u; (void)b; (void)p; (void)length; UNEXPECTED("bin"); return 0; } static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos, unsigned int length) { (void)u; (void)base; (void)pos; (void)length; UNEXPECTED("ext"); return 0; } #include "unpack_template.h" borgbackup-1.1.15/src/borg/cache_sync/sysdep.h0000644000175000017500000001450113771325506021205 0ustar useruser00000000000000/* * MessagePack system dependencies * * Copyright (C) 2008-2010 FURUHASHI Sadayuki * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MSGPACK_SYSDEP_H__ #define MSGPACK_SYSDEP_H__ #include #include #if defined(_MSC_VER) && _MSC_VER < 1600 typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; #elif defined(_MSC_VER) // && _MSC_VER >= 1600 #include #else #include #include #endif #ifdef _WIN32 #define _msgpack_atomic_counter_header typedef long _msgpack_atomic_counter_t; #define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr) #define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr) #elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41) #define _msgpack_atomic_counter_header "gcc_atomic.h" #else typedef unsigned int _msgpack_atomic_counter_t; #define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1) #define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1) #endif #ifdef _WIN32 #ifdef __cplusplus /* numeric_limits::min,max */ #ifdef max #undef max #endif #ifdef min #undef min #endif #endif #else #include /* __BYTE_ORDER */ #endif #if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #if __BYTE_ORDER == __LITTLE_ENDIAN #define __LITTLE_ENDIAN__ #elif __BYTE_ORDER == __BIG_ENDIAN #define __BIG_ENDIAN__ #elif _WIN32 #define __LITTLE_ENDIAN__ #endif #endif #ifdef __LITTLE_ENDIAN__ #ifdef _WIN32 # if defined(ntohs) # define _msgpack_be16(x) ntohs(x) # elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400) # define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x)) # else # define _msgpack_be16(x) ( \ ((((uint16_t)x) << 8) ) | \ ((((uint16_t)x) >> 8) ) ) # endif #else # define _msgpack_be16(x) ntohs(x) #endif #ifdef _WIN32 # if defined(ntohl) # define _msgpack_be32(x) ntohl(x) # elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400) # define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x)) # else # define _msgpack_be32(x) \ ( ((((uint32_t)x) << 24) ) | \ ((((uint32_t)x) << 8) & 0x00ff0000U ) | \ ((((uint32_t)x) >> 8) & 0x0000ff00U ) | \ ((((uint32_t)x) >> 24) ) ) # endif #else # define _msgpack_be32(x) ntohl(x) #endif #if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400) # define _msgpack_be64(x) (_byteswap_uint64(x)) #elif defined(bswap_64) # define _msgpack_be64(x) bswap_64(x) #elif defined(__DARWIN_OSSwapInt64) # define _msgpack_be64(x) __DARWIN_OSSwapInt64(x) #else #define _msgpack_be64(x) \ ( ((((uint64_t)x) << 56) ) | \ ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \ ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \ ((((uint64_t)x) << 8) & 0x000000ff00000000ULL ) | \ ((((uint64_t)x) >> 8) & 0x00000000ff000000ULL ) | \ ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \ ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \ ((((uint64_t)x) >> 56) ) ) #endif #define _msgpack_load16(cast, from) ((cast)( \ (((uint16_t)((uint8_t*)(from))[0]) << 8) | \ (((uint16_t)((uint8_t*)(from))[1]) ) )) #define _msgpack_load32(cast, from) ((cast)( \ (((uint32_t)((uint8_t*)(from))[0]) << 24) | \ (((uint32_t)((uint8_t*)(from))[1]) << 16) | \ (((uint32_t)((uint8_t*)(from))[2]) << 8) | \ (((uint32_t)((uint8_t*)(from))[3]) ) )) #define _msgpack_load64(cast, from) ((cast)( \ (((uint64_t)((uint8_t*)(from))[0]) << 56) | \ (((uint64_t)((uint8_t*)(from))[1]) << 48) | \ (((uint64_t)((uint8_t*)(from))[2]) << 40) | \ (((uint64_t)((uint8_t*)(from))[3]) << 32) | \ (((uint64_t)((uint8_t*)(from))[4]) << 24) | \ (((uint64_t)((uint8_t*)(from))[5]) << 16) | \ (((uint64_t)((uint8_t*)(from))[6]) << 8) | \ (((uint64_t)((uint8_t*)(from))[7]) ) )) #else #define _msgpack_be16(x) (x) #define _msgpack_be32(x) (x) #define _msgpack_be64(x) (x) #define _msgpack_load16(cast, from) ((cast)( \ (((uint16_t)((uint8_t*)from)[0]) << 8) | \ (((uint16_t)((uint8_t*)from)[1]) ) )) #define _msgpack_load32(cast, from) ((cast)( \ (((uint32_t)((uint8_t*)from)[0]) << 24) | \ (((uint32_t)((uint8_t*)from)[1]) << 16) | \ (((uint32_t)((uint8_t*)from)[2]) << 8) | \ (((uint32_t)((uint8_t*)from)[3]) ) )) #define _msgpack_load64(cast, from) ((cast)( \ (((uint64_t)((uint8_t*)from)[0]) << 56) | \ (((uint64_t)((uint8_t*)from)[1]) << 48) | \ (((uint64_t)((uint8_t*)from)[2]) << 40) | \ (((uint64_t)((uint8_t*)from)[3]) << 32) | \ (((uint64_t)((uint8_t*)from)[4]) << 24) | \ (((uint64_t)((uint8_t*)from)[5]) << 16) | \ (((uint64_t)((uint8_t*)from)[6]) << 8) | \ (((uint64_t)((uint8_t*)from)[7]) ) )) #endif #define _msgpack_store16(to, num) \ do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0) #define _msgpack_store32(to, num) \ do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0) #define _msgpack_store64(to, num) \ do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0) /* #define _msgpack_load16(cast, from) \ ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); }) #define _msgpack_load32(cast, from) \ ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); }) #define _msgpack_load64(cast, from) \ ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); }) */ #endif /* msgpack/sysdep.h */ borgbackup-1.1.15/src/borg/cache_sync/unpack_template.h0000644000175000017500000003006213771325506023052 0ustar useruser00000000000000/* * MessagePack unpacking routine template * * Copyright (C) 2008-2010 FURUHASHI Sadayuki * Copyright (c) 2017 Marian Beermann * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * This has been slightly adapted from the vanilla msgpack-{c, python} version. * Since cache_sync does not intend to build an output data structure, * msgpack_unpack_object and all of its uses was removed. */ #ifndef USE_CASE_RANGE #if !defined(_MSC_VER) #define USE_CASE_RANGE #endif #endif typedef struct unpack_stack { size_t size; size_t count; unsigned int ct; } unpack_stack; struct unpack_context { unpack_user user; unsigned int cs; unsigned int trail; unsigned int top; unpack_stack stack[MSGPACK_EMBED_STACK_SIZE]; }; static inline void unpack_init(unpack_context* ctx) { ctx->cs = CS_HEADER; ctx->trail = 0; ctx->top = 0; unpack_init_user_state(&ctx->user); } #define construct 1 static inline int unpack_execute(unpack_context* ctx, const char* data, size_t len, size_t* off) { const unsigned char* p = (unsigned char*)data + *off; const unsigned char* const pe = (unsigned char*)data + len; const void* n = NULL; unsigned int trail = ctx->trail; unsigned int cs = ctx->cs; unsigned int top = ctx->top; unpack_stack* stack = ctx->stack; unpack_user* user = &ctx->user; unpack_stack* c = NULL; int ret; assert(len >= *off); #define construct_cb(name) \ construct && unpack_callback ## name #define push_simple_value(func) \ if(construct_cb(func)(user) < 0) { goto _failed; } \ goto _push #define push_fixed_value(func, arg) \ if(construct_cb(func)(user, arg) < 0) { goto _failed; } \ goto _push #define push_variable_value(func, base, pos, len) \ if(construct_cb(func)(user, \ (const char*)base, (const char*)pos, len) < 0) { goto _failed; } \ goto _push #define again_fixed_trail(_cs, trail_len) \ trail = trail_len; \ cs = _cs; \ goto _fixed_trail_again #define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \ trail = trail_len; \ if(trail == 0) { goto ifzero; } \ cs = _cs; \ goto _fixed_trail_again #define start_container(func, count_, ct_) \ if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \ if(construct_cb(func)(user, count_) < 0) { goto _failed; } \ if((count_) == 0) { \ if (construct_cb(func##_end)(user) < 0) { goto _failed; } \ goto _push; } \ stack[top].ct = ct_; \ stack[top].size = count_; \ stack[top].count = 0; \ ++top; \ goto _header_again #define NEXT_CS(p) ((unsigned int)*p & 0x1f) #ifdef USE_CASE_RANGE #define SWITCH_RANGE_BEGIN switch(*p) { #define SWITCH_RANGE(FROM, TO) case FROM ... TO: #define SWITCH_RANGE_DEFAULT default: #define SWITCH_RANGE_END } #else #define SWITCH_RANGE_BEGIN { if(0) { #define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) { #define SWITCH_RANGE_DEFAULT } else { #define SWITCH_RANGE_END } } #endif if(p == pe) { goto _out; } do { switch(cs) { case CS_HEADER: SWITCH_RANGE_BEGIN SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum push_fixed_value(_uint8, *(uint8_t*)p); SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum push_fixed_value(_int8, *(int8_t*)p); SWITCH_RANGE(0xc0, 0xdf) // Variable switch(*p) { case 0xc0: // nil push_simple_value(_nil); //case 0xc1: // never used case 0xc2: // false push_simple_value(_false); case 0xc3: // true push_simple_value(_true); case 0xc4: // bin 8 again_fixed_trail(NEXT_CS(p), 1); case 0xc5: // bin 16 again_fixed_trail(NEXT_CS(p), 2); case 0xc6: // bin 32 again_fixed_trail(NEXT_CS(p), 4); case 0xc7: // ext 8 again_fixed_trail(NEXT_CS(p), 1); case 0xc8: // ext 16 again_fixed_trail(NEXT_CS(p), 2); case 0xc9: // ext 32 again_fixed_trail(NEXT_CS(p), 4); case 0xca: // float case 0xcb: // double case 0xcc: // unsigned int 8 case 0xcd: // unsigned int 16 case 0xce: // unsigned int 32 case 0xcf: // unsigned int 64 case 0xd0: // signed int 8 case 0xd1: // signed int 16 case 0xd2: // signed int 32 case 0xd3: // signed int 64 again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03)); case 0xd4: // fixext 1 case 0xd5: // fixext 2 case 0xd6: // fixext 4 case 0xd7: // fixext 8 again_fixed_trail_if_zero(ACS_EXT_VALUE, (1 << (((unsigned int)*p) & 0x03))+1, _ext_zero); case 0xd8: // fixext 16 again_fixed_trail_if_zero(ACS_EXT_VALUE, 16+1, _ext_zero); case 0xd9: // str 8 again_fixed_trail(NEXT_CS(p), 1); case 0xda: // raw 16 case 0xdb: // raw 32 case 0xdc: // array 16 case 0xdd: // array 32 case 0xde: // map 16 case 0xdf: // map 32 again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01)); default: goto _failed; } SWITCH_RANGE(0xa0, 0xbf) // FixRaw again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero); SWITCH_RANGE(0x90, 0x9f) // FixArray start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM); SWITCH_RANGE(0x80, 0x8f) // FixMap start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY); SWITCH_RANGE_DEFAULT goto _failed; SWITCH_RANGE_END // end CS_HEADER _fixed_trail_again: ++p; default: if((size_t)(pe - p) < trail) { goto _out; } n = p; p += trail - 1; switch(cs) { case CS_EXT_8: again_fixed_trail_if_zero(ACS_EXT_VALUE, *(uint8_t*)n+1, _ext_zero); case CS_EXT_16: again_fixed_trail_if_zero(ACS_EXT_VALUE, _msgpack_load16(uint16_t,n)+1, _ext_zero); case CS_EXT_32: again_fixed_trail_if_zero(ACS_EXT_VALUE, _msgpack_load32(uint32_t,n)+1, _ext_zero); case CS_FLOAT: { union { uint32_t i; float f; } mem; mem.i = _msgpack_load32(uint32_t,n); push_fixed_value(_float, mem.f); } case CS_DOUBLE: { union { uint64_t i; double f; } mem; mem.i = _msgpack_load64(uint64_t,n); #if defined(__arm__) && !(__ARM_EABI__) // arm-oabi // https://github.com/msgpack/msgpack-perl/pull/1 mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL); #endif push_fixed_value(_double, mem.f); } case CS_UINT_8: push_fixed_value(_uint8, *(uint8_t*)n); case CS_UINT_16: push_fixed_value(_uint16, _msgpack_load16(uint16_t,n)); case CS_UINT_32: push_fixed_value(_uint32, _msgpack_load32(uint32_t,n)); case CS_UINT_64: push_fixed_value(_uint64, _msgpack_load64(uint64_t,n)); case CS_INT_8: push_fixed_value(_int8, *(int8_t*)n); case CS_INT_16: push_fixed_value(_int16, _msgpack_load16(int16_t,n)); case CS_INT_32: push_fixed_value(_int32, _msgpack_load32(int32_t,n)); case CS_INT_64: push_fixed_value(_int64, _msgpack_load64(int64_t,n)); case CS_BIN_8: again_fixed_trail_if_zero(ACS_BIN_VALUE, *(uint8_t*)n, _bin_zero); case CS_BIN_16: again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load16(uint16_t,n), _bin_zero); case CS_BIN_32: again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load32(uint32_t,n), _bin_zero); case ACS_BIN_VALUE: _bin_zero: push_variable_value(_bin, data, n, trail); case CS_RAW_8: again_fixed_trail_if_zero(ACS_RAW_VALUE, *(uint8_t*)n, _raw_zero); case CS_RAW_16: again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero); case CS_RAW_32: again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero); case ACS_RAW_VALUE: _raw_zero: push_variable_value(_raw, data, n, trail); case ACS_EXT_VALUE: _ext_zero: push_variable_value(_ext, data, n, trail); case CS_ARRAY_16: start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM); case CS_ARRAY_32: /* FIXME security guard */ start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM); case CS_MAP_16: start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY); case CS_MAP_32: /* FIXME security guard */ start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY); default: goto _failed; } } _push: if(top == 0) { goto _finish; } c = &stack[top-1]; switch(c->ct) { case CT_ARRAY_ITEM: if(construct_cb(_array_item)(user, c->count) < 0) { goto _failed; } if(++c->count == c->size) { if (construct_cb(_array_end)(user) < 0) { goto _failed; } --top; /*printf("stack pop %d\n", top);*/ goto _push; } goto _header_again; case CT_MAP_KEY: c->ct = CT_MAP_VALUE; goto _header_again; case CT_MAP_VALUE: if(construct_cb(_map_item)(user, c->count) < 0) { goto _failed; } if(++c->count == c->size) { if (construct_cb(_map_end)(user) < 0) { goto _failed; } --top; /*printf("stack pop %d\n", top);*/ goto _push; } c->ct = CT_MAP_KEY; goto _header_again; default: goto _failed; } _header_again: cs = CS_HEADER; ++p; } while(p != pe); goto _out; _finish: if (!construct) unpack_callback_nil(user); ++p; ret = 1; /* printf("-- finish --\n"); */ goto _end; _failed: /* printf("** FAILED **\n"); */ ret = -1; goto _end; _out: ret = 0; goto _end; _end: ctx->cs = cs; ctx->trail = trail; ctx->top = top; *off = p - (const unsigned char*)data; return ret; #undef construct_cb } #undef SWITCH_RANGE_BEGIN #undef SWITCH_RANGE #undef SWITCH_RANGE_DEFAULT #undef SWITCH_RANGE_END #undef push_simple_value #undef push_fixed_value #undef push_variable_value #undef again_fixed_trail #undef again_fixed_trail_if_zero #undef start_container #undef construct #undef NEXT_CS /* vim: set ts=4 sw=4 sts=4 expandtab */ borgbackup-1.1.15/src/borg/cache_sync/unpack_define.h0000644000175000017500000000446713771325506022503 0ustar useruser00000000000000/* * MessagePack unpacking routine template * * Copyright (C) 2008-2010 FURUHASHI Sadayuki * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MSGPACK_UNPACK_DEFINE_H__ #define MSGPACK_UNPACK_DEFINE_H__ #include "sysdep.h" #include #include #include #include #ifdef __cplusplus extern "C" { #endif #ifndef MSGPACK_EMBED_STACK_SIZE #define MSGPACK_EMBED_STACK_SIZE 32 #endif // CS is first byte & 0x1f typedef enum { CS_HEADER = 0x00, // nil //CS_ = 0x01, //CS_ = 0x02, // false //CS_ = 0x03, // true CS_BIN_8 = 0x04, CS_BIN_16 = 0x05, CS_BIN_32 = 0x06, CS_EXT_8 = 0x07, CS_EXT_16 = 0x08, CS_EXT_32 = 0x09, CS_FLOAT = 0x0a, CS_DOUBLE = 0x0b, CS_UINT_8 = 0x0c, CS_UINT_16 = 0x0d, CS_UINT_32 = 0x0e, CS_UINT_64 = 0x0f, CS_INT_8 = 0x10, CS_INT_16 = 0x11, CS_INT_32 = 0x12, CS_INT_64 = 0x13, //CS_FIXEXT1 = 0x14, //CS_FIXEXT2 = 0x15, //CS_FIXEXT4 = 0x16, //CS_FIXEXT8 = 0x17, //CS_FIXEXT16 = 0x18, CS_RAW_8 = 0x19, CS_RAW_16 = 0x1a, CS_RAW_32 = 0x1b, CS_ARRAY_16 = 0x1c, CS_ARRAY_32 = 0x1d, CS_MAP_16 = 0x1e, CS_MAP_32 = 0x1f, ACS_RAW_VALUE, ACS_BIN_VALUE, ACS_EXT_VALUE, } msgpack_unpack_state; typedef enum { CT_ARRAY_ITEM, CT_MAP_KEY, CT_MAP_VALUE, } msgpack_container_type; #ifdef __cplusplus } #endif #endif /* msgpack/unpack_define.h */ borgbackup-1.1.15/src/borg/hashindex.c0000644000175000017500000210734113771325772017561 0ustar useruser00000000000000/* Generated by Cython 0.29.21 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__borg__hashindex #define __PYX_HAVE_API__borg__hashindex /* Early includes */ #include #include #include #include #include "_hashindex.c" #include "cache_sync/cache_sync.c" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "src/borg/hashindex.pyx", "stringsource", "type.pxd", }; /*--- Type declarations ---*/ struct __pyx_obj_4borg_9hashindex_IndexBase; struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex; struct __pyx_obj_4borg_9hashindex_NSIndex; struct __pyx_obj_4borg_9hashindex_NSKeyIterator; struct __pyx_obj_4borg_9hashindex_ChunkIndex; struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator; struct __pyx_obj_4borg_9hashindex_CacheSynchronizer; /* "borg/hashindex.pyx":84 * * @cython.internal * cdef class IndexBase: # <<<<<<<<<<<<<< * cdef HashIndex *index * cdef int key_size */ struct __pyx_obj_4borg_9hashindex_IndexBase { PyObject_HEAD HashIndex *index; int key_size; }; /* "borg/hashindex.pyx":169 * * * cdef class FuseVersionsIndex(IndexBase): # <<<<<<<<<<<<<< * # 4 byte version + 16 byte file contents hash * value_size = 20 */ struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex { struct __pyx_obj_4borg_9hashindex_IndexBase __pyx_base; }; /* "borg/hashindex.pyx":199 * * * cdef class NSIndex(IndexBase): # <<<<<<<<<<<<<< * * value_size = 8 */ struct __pyx_obj_4borg_9hashindex_NSIndex { struct __pyx_obj_4borg_9hashindex_IndexBase __pyx_base; }; /* "borg/hashindex.pyx":244 * * * cdef class NSKeyIterator: # <<<<<<<<<<<<<< * cdef NSIndex idx * cdef HashIndex *index */ struct __pyx_obj_4borg_9hashindex_NSKeyIterator { PyObject_HEAD struct __pyx_obj_4borg_9hashindex_NSIndex *idx; HashIndex *index; void const *key; int key_size; int exhausted; }; /* "borg/hashindex.pyx":275 * * * cdef class ChunkIndex(IndexBase): # <<<<<<<<<<<<<< * """ * Mapping of 32 byte keys to (refcount, size, csize), which are all 32-bit unsigned. */ struct __pyx_obj_4borg_9hashindex_ChunkIndex { struct __pyx_obj_4borg_9hashindex_IndexBase __pyx_base; struct __pyx_vtabstruct_4borg_9hashindex_ChunkIndex *__pyx_vtab; }; /* "borg/hashindex.pyx":474 * * * cdef class ChunkKeyIterator: # <<<<<<<<<<<<<< * cdef ChunkIndex idx * cdef HashIndex *index */ struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator { PyObject_HEAD struct __pyx_obj_4borg_9hashindex_ChunkIndex *idx; HashIndex *index; void const *key; int key_size; int exhausted; }; /* "borg/hashindex.pyx":508 * * * cdef class CacheSynchronizer: # <<<<<<<<<<<<<< * cdef ChunkIndex chunks * cdef CacheSyncCtx *sync */ struct __pyx_obj_4borg_9hashindex_CacheSynchronizer { PyObject_HEAD struct __pyx_obj_4borg_9hashindex_ChunkIndex *chunks; CacheSyncCtx *sync; }; /* "borg/hashindex.pyx":275 * * * cdef class ChunkIndex(IndexBase): # <<<<<<<<<<<<<< * """ * Mapping of 32 byte keys to (refcount, size, csize), which are all 32-bit unsigned. */ struct __pyx_vtabstruct_4borg_9hashindex_ChunkIndex { PyObject *(*_add)(struct __pyx_obj_4borg_9hashindex_ChunkIndex *, void *, uint32_t *); }; static struct __pyx_vtabstruct_4borg_9hashindex_ChunkIndex *__pyx_vtabptr_4borg_9hashindex_ChunkIndex; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectLookupSpecial.proto */ #if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name) { PyObject *res; PyTypeObject *tp = Py_TYPE(obj); #if PY_MAJOR_VERSION < 3 if (unlikely(PyInstance_Check(obj))) return __Pyx_PyObject_GetAttrStr(obj, attr_name); #endif res = _PyType_Lookup(tp, attr_name); if (likely(res)) { descrgetfunc f = Py_TYPE(res)->tp_descr_get; if (!f) { Py_INCREF(res); } else { res = f(res, obj, (PyObject *)tp); } } else { PyErr_SetObject(PyExc_AttributeError, attr_name); } return res; } #else #define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PySequenceContains.proto */ static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { int result = PySequence_Contains(seq, item); return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* IncludeStringH.proto */ #include /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* None.proto */ static CYTHON_INLINE long __Pyx_mod_long(long, long); /* ClassMethod.proto */ #include "descrobject.h" static CYTHON_UNUSED PyObject* __Pyx_Method_ClassMethod(PyObject *method); /* GetNameInClass.proto */ #define __Pyx_GetNameInClass(var, nmspace, name) (var) = __Pyx__GetNameInClass(nmspace, name) static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint32_t(uint32_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint64_t(uint64_t value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_f_4borg_9hashindex_10ChunkIndex__add(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, void *__pyx_v_key, uint32_t *__pyx_v_data); /* proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'libc.stdint' */ /* Module declarations from 'libc.errno' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'borg.hashindex' */ static PyTypeObject *__pyx_ptype_4borg_9hashindex_IndexBase = 0; static PyTypeObject *__pyx_ptype_4borg_9hashindex_FuseVersionsIndex = 0; static PyTypeObject *__pyx_ptype_4borg_9hashindex_NSIndex = 0; static PyTypeObject *__pyx_ptype_4borg_9hashindex_NSKeyIterator = 0; static PyTypeObject *__pyx_ptype_4borg_9hashindex_ChunkIndex = 0; static PyTypeObject *__pyx_ptype_4borg_9hashindex_ChunkKeyIterator = 0; static PyTypeObject *__pyx_ptype_4borg_9hashindex_CacheSynchronizer = 0; static PyObject *__pyx_v_4borg_9hashindex__NoDefault = 0; static Py_buffer __pyx_f_4borg_9hashindex_ro_buffer(PyObject *); /*proto*/ #define __Pyx_MODULE_NAME "borg.hashindex" extern int __pyx_module_is_main_borg__hashindex; int __pyx_module_is_main_borg__hashindex = 0; /* Implementation of 'borg.hashindex' */ static PyObject *__pyx_builtin_object; static PyObject *__pyx_builtin_open; static PyObject *__pyx_builtin_KeyError; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_IndexError; static PyObject *__pyx_builtin_StopIteration; static PyObject *__pyx_builtin_ValueError; static const char __pyx_k_os[] = "os"; static const char __pyx_k_rb[] = "rb"; static const char __pyx_k_wb[] = "wb"; static const char __pyx_k_key[] = "key"; static const char __pyx_k_exit[] = "__exit__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_open[] = "open"; static const char __pyx_k_path[] = "path"; static const char __pyx_k_read[] = "read"; static const char __pyx_k_refs[] = "refs"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_csize[] = "csize"; static const char __pyx_k_enter[] = "__enter__"; static const char __pyx_k_value[] = "value"; static const char __pyx_k_1_1_07[] = "1.1_07"; static const char __pyx_k_chunks[] = "chunks"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_locale[] = "locale"; static const char __pyx_k_marker[] = "marker"; static const char __pyx_k_object[] = "object"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_NSIndex[] = "NSIndex"; static const char __pyx_k_default[] = "default"; static const char __pyx_k_KeyError[] = "KeyError"; static const char __pyx_k_capacity[] = "capacity"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_key_size[] = "_key_size"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_MAX_VALUE[] = "MAX_VALUE"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_ChunkIndex[] = "ChunkIndex"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_key_size_2[] = "key_size"; static const char __pyx_k_namedtuple[] = "namedtuple"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_value_size[] = "value_size"; static const char __pyx_k_API_VERSION[] = "API_VERSION"; static const char __pyx_k_collections[] = "collections"; static const char __pyx_k_NSKeyIterator[] = "NSKeyIterator"; static const char __pyx_k_StopIteration[] = "StopIteration"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_permit_compact[] = "permit_compact"; static const char __pyx_k_ChunkIndexEntry[] = "ChunkIndexEntry"; static const char __pyx_k_MAX_LOAD_FACTOR[] = "MAX_LOAD_FACTOR"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_ChunkKeyIterator[] = "ChunkKeyIterator"; static const char __pyx_k_CacheSynchronizer[] = "CacheSynchronizer"; static const char __pyx_k_FuseVersionsIndex[] = "FuseVersionsIndex"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_refcount_size_csize[] = "refcount size csize"; static const char __pyx_k_hashindex_set_failed[] = "hashindex_set failed"; static const char __pyx_k_hashindex_init_failed[] = "hashindex_init failed"; static const char __pyx_k_cache_sync_feed_failed[] = "cache_sync_feed failed: "; static const char __pyx_k_cache_sync_init_failed[] = "cache_sync_init failed"; static const char __pyx_k_hashindex_delete_failed[] = "hashindex_delete failed"; static const char __pyx_k_invalid_reference_count[] = "invalid reference count"; static const char __pyx_k_Expected_bytes_of_length_16_for[] = "Expected bytes of length 16 for second value"; static const char __pyx_k_hashindex_read_returned_NULL_wit[] = "hashindex_read() returned NULL with no exception set"; static const char __pyx_k_maximum_number_of_segments_reach[] = "maximum number of segments reached"; static const char __pyx_k_maximum_number_of_versions_reach[] = "maximum number of versions reached"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_stats_against_key_contained_in_s[] = "stats_against: key contained in self but not in master_index."; static PyObject *__pyx_kp_u_1_1_07; static PyObject *__pyx_n_s_API_VERSION; static PyObject *__pyx_n_s_CacheSynchronizer; static PyObject *__pyx_n_s_ChunkIndex; static PyObject *__pyx_n_s_ChunkIndexEntry; static PyObject *__pyx_n_u_ChunkIndexEntry; static PyObject *__pyx_n_s_ChunkKeyIterator; static PyObject *__pyx_kp_u_Expected_bytes_of_length_16_for; static PyObject *__pyx_n_s_FuseVersionsIndex; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_n_s_KeyError; static PyObject *__pyx_n_s_MAX_LOAD_FACTOR; static PyObject *__pyx_n_s_MAX_VALUE; static PyObject *__pyx_n_s_NSIndex; static PyObject *__pyx_n_s_NSKeyIterator; static PyObject *__pyx_n_s_StopIteration; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_kp_u_cache_sync_feed_failed; static PyObject *__pyx_kp_u_cache_sync_init_failed; static PyObject *__pyx_n_s_capacity; static PyObject *__pyx_n_s_chunks; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_collections; static PyObject *__pyx_n_s_csize; static PyObject *__pyx_n_s_default; static PyObject *__pyx_n_s_enter; static PyObject *__pyx_n_s_exit; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_u_hashindex_delete_failed; static PyObject *__pyx_kp_u_hashindex_init_failed; static PyObject *__pyx_kp_u_hashindex_read_returned_NULL_wit; static PyObject *__pyx_kp_u_hashindex_set_failed; static PyObject *__pyx_n_s_import; static PyObject *__pyx_kp_u_invalid_reference_count; static PyObject *__pyx_n_s_key; static PyObject *__pyx_n_s_key_size; static PyObject *__pyx_n_s_key_size_2; static PyObject *__pyx_n_s_locale; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_marker; static PyObject *__pyx_kp_u_maximum_number_of_segments_reach; static PyObject *__pyx_kp_u_maximum_number_of_versions_reach; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_namedtuple; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_object; static PyObject *__pyx_n_s_open; static PyObject *__pyx_n_s_os; static PyObject *__pyx_n_s_path; static PyObject *__pyx_n_s_permit_compact; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_u_rb; static PyObject *__pyx_n_s_read; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_kp_u_refcount_size_csize; static PyObject *__pyx_n_s_refs; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_size; static PyObject *__pyx_kp_u_stats_against_key_contained_in_s; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_value; static PyObject *__pyx_n_s_value_size; static PyObject *__pyx_n_u_wb; static int __pyx_pf_4borg_9hashindex_9IndexBase___cinit__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_capacity, PyObject *__pyx_v_path, PyObject *__pyx_v_permit_compact); /* proto */ static void __pyx_pf_4borg_9hashindex_9IndexBase_2__dealloc__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_4read(PyTypeObject *__pyx_v_cls, PyObject *__pyx_v_path, PyObject *__pyx_v_permit_compact); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_6write(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_path); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_8clear(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_10setdefault(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value); /* proto */ static int __pyx_pf_4borg_9hashindex_9IndexBase_12__delitem__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_14get(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_default); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_16pop(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_default); /* proto */ static Py_ssize_t __pyx_pf_4borg_9hashindex_9IndexBase_18__len__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_20size(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_22compact(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_24__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_26__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17FuseVersionsIndex___getitem__(struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static int __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_2__setitem__(struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value); /* proto */ static int __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_4__contains__(struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17FuseVersionsIndex_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17FuseVersionsIndex_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex___getitem__(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static int __pyx_pf_4borg_9hashindex_7NSIndex_2__setitem__(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value); /* proto */ static int __pyx_pf_4borg_9hashindex_7NSIndex_4__contains__(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex_6iteritems(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_marker); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex_8__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex_10__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_pf_4borg_9hashindex_13NSKeyIterator___cinit__(struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self, PyObject *__pyx_v_key_size); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_2__iter__(struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_4__next__(struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex___getitem__(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static int __pyx_pf_4borg_9hashindex_10ChunkIndex_2__setitem__(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value); /* proto */ static int __pyx_pf_4borg_9hashindex_10ChunkIndex_4__contains__(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_6incref(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_8decref(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_10iteritems(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_marker); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_12summarize(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_14stats_against(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_master_index); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_16add(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_refs, PyObject *__pyx_v_size, PyObject *__pyx_v_csize); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_18merge(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_other); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_20zero_csize_ids(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_22__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_24__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_pf_4borg_9hashindex_16ChunkKeyIterator___cinit__(struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self, PyObject *__pyx_v_key_size); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_2__iter__(struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_4__next__(struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_pf_4borg_9hashindex_17CacheSynchronizer___cinit__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self, PyObject *__pyx_v_chunks); /* proto */ static void __pyx_pf_4borg_9hashindex_17CacheSynchronizer_2__dealloc__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_4feed(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self, PyObject *__pyx_v_chunk); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_16num_files_totals___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_15num_files_parts___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_11size_totals___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_10size_parts___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_12csize_totals___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_11csize_parts___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_4borg_9hashindex_IndexBase(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_9hashindex_FuseVersionsIndex(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_9hashindex_NSIndex(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_9hashindex_NSKeyIterator(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_9hashindex_ChunkIndex(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_9hashindex_ChunkKeyIterator(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4borg_9hashindex_CacheSynchronizer(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_8; static PyObject *__pyx_int_12; static PyObject *__pyx_int_16; static PyObject *__pyx_int_20; static PyObject *__pyx_int_32; static PyObject *__pyx_int_4294967295; static PyObject *__pyx_k__4; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; /* Late includes */ /* "borg/hashindex.pyx":93 * MAX_VALUE = _MAX_VALUE * * def __cinit__(self, capacity=0, path=None, permit_compact=False): # <<<<<<<<<<<<<< * self.key_size = self._key_size * if path: */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_9IndexBase_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4borg_9hashindex_9IndexBase_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_capacity = 0; PyObject *__pyx_v_path = 0; PyObject *__pyx_v_permit_compact = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_capacity,&__pyx_n_s_path,&__pyx_n_s_permit_compact,0}; PyObject* values[3] = {0,0,0}; values[0] = ((PyObject *)__pyx_int_0); values[1] = ((PyObject *)Py_None); values[2] = ((PyObject *)Py_False); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_capacity); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_path); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_permit_compact); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(0, 93, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_capacity = values[0]; __pyx_v_path = values[1]; __pyx_v_permit_compact = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 93, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.IndexBase.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase___cinit__(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self), __pyx_v_capacity, __pyx_v_path, __pyx_v_permit_compact); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_9IndexBase___cinit__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_capacity, PyObject *__pyx_v_path, PyObject *__pyx_v_permit_compact) { PyObject *__pyx_v_fd = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; HashIndex *__pyx_t_13; PyObject *__pyx_t_14 = NULL; int __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "borg/hashindex.pyx":94 * * def __cinit__(self, capacity=0, path=None, permit_compact=False): * self.key_size = self._key_size # <<<<<<<<<<<<<< * if path: * if isinstance(path, (str, bytes)): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_key_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_self->key_size = __pyx_t_2; /* "borg/hashindex.pyx":95 * def __cinit__(self, capacity=0, path=None, permit_compact=False): * self.key_size = self._key_size * if path: # <<<<<<<<<<<<<< * if isinstance(path, (str, bytes)): * with open(path, 'rb') as fd: */ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_path); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 95, __pyx_L1_error) if (__pyx_t_3) { /* "borg/hashindex.pyx":96 * self.key_size = self._key_size * if path: * if isinstance(path, (str, bytes)): # <<<<<<<<<<<<<< * with open(path, 'rb') as fd: * self.index = hashindex_read(fd, permit_compact) */ __pyx_t_4 = PyUnicode_Check(__pyx_v_path); __pyx_t_5 = (__pyx_t_4 != 0); if (!__pyx_t_5) { } else { __pyx_t_3 = __pyx_t_5; goto __pyx_L5_bool_binop_done; } __pyx_t_5 = PyBytes_Check(__pyx_v_path); __pyx_t_4 = (__pyx_t_5 != 0); __pyx_t_3 = __pyx_t_4; __pyx_L5_bool_binop_done:; __pyx_t_4 = (__pyx_t_3 != 0); if (__pyx_t_4) { /* "borg/hashindex.pyx":97 * if path: * if isinstance(path, (str, bytes)): * with open(path, 'rb') as fd: # <<<<<<<<<<<<<< * self.index = hashindex_read(fd, permit_compact) * else: */ /*with:*/ { __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_path); __Pyx_GIVEREF(__pyx_v_path); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_path); __Pyx_INCREF(__pyx_n_u_rb); __Pyx_GIVEREF(__pyx_n_u_rb); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_n_u_rb); __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_1, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_t_6, __pyx_n_s_exit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyObject_LookupSpecial(__pyx_t_6, __pyx_n_s_enter); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 97, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); } } __pyx_t_1 = (__pyx_t_9) ? __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_9) : __Pyx_PyObject_CallNoArg(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __pyx_t_1; __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); /*try:*/ { __pyx_v_fd = __pyx_t_8; __pyx_t_8 = 0; /* "borg/hashindex.pyx":98 * if isinstance(path, (str, bytes)): * with open(path, 'rb') as fd: * self.index = hashindex_read(fd, permit_compact) # <<<<<<<<<<<<<< * else: * self.index = hashindex_read(path, permit_compact) */ __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_permit_compact); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 98, __pyx_L11_error) __pyx_t_13 = hashindex_read(__pyx_v_fd, __pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 98, __pyx_L11_error) __pyx_v_self->index = __pyx_t_13; /* "borg/hashindex.pyx":97 * if path: * if isinstance(path, (str, bytes)): * with open(path, 'rb') as fd: # <<<<<<<<<<<<<< * self.index = hashindex_read(fd, permit_compact) * else: */ } __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; goto __pyx_L16_try_end; __pyx_L11_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /*except:*/ { __Pyx_AddTraceback("borg.hashindex.IndexBase.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_1) < 0) __PYX_ERR(0, 97, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = PyTuple_Pack(3, __pyx_t_8, __pyx_t_6, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 97, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 97, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_14); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; if (__pyx_t_4 < 0) __PYX_ERR(0, 97, __pyx_L13_except_error) __pyx_t_3 = ((!(__pyx_t_4 != 0)) != 0); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_1); __Pyx_ErrRestoreWithState(__pyx_t_8, __pyx_t_6, __pyx_t_1); __pyx_t_8 = 0; __pyx_t_6 = 0; __pyx_t_1 = 0; __PYX_ERR(0, 97, __pyx_L13_except_error) } __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L12_exception_handled; } __pyx_L13_except_error:; __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); goto __pyx_L1_error; __pyx_L12_exception_handled:; __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); __pyx_L16_try_end:; } } /*finally:*/ { /*normal exit:*/{ if (__pyx_t_7) { __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_tuple_, NULL); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } goto __pyx_L10; } __pyx_L10:; } goto __pyx_L20; __pyx_L7_error:; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L1_error; __pyx_L20:; } /* "borg/hashindex.pyx":96 * self.key_size = self._key_size * if path: * if isinstance(path, (str, bytes)): # <<<<<<<<<<<<<< * with open(path, 'rb') as fd: * self.index = hashindex_read(fd, permit_compact) */ goto __pyx_L4; } /* "borg/hashindex.pyx":100 * self.index = hashindex_read(fd, permit_compact) * else: * self.index = hashindex_read(path, permit_compact) # <<<<<<<<<<<<<< * assert self.index, 'hashindex_read() returned NULL with no exception set' * else: */ /*else*/ { __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_permit_compact); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 100, __pyx_L1_error) __pyx_t_13 = hashindex_read(__pyx_v_path, __pyx_t_2); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 100, __pyx_L1_error) __pyx_v_self->index = __pyx_t_13; } __pyx_L4:; /* "borg/hashindex.pyx":101 * else: * self.index = hashindex_read(path, permit_compact) * assert self.index, 'hashindex_read() returned NULL with no exception set' # <<<<<<<<<<<<<< * else: * self.index = hashindex_init(capacity, self.key_size, self.value_size) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(__pyx_v_self->index != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_hashindex_read_returned_NULL_wit); __PYX_ERR(0, 101, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":95 * def __cinit__(self, capacity=0, path=None, permit_compact=False): * self.key_size = self._key_size * if path: # <<<<<<<<<<<<<< * if isinstance(path, (str, bytes)): * with open(path, 'rb') as fd: */ goto __pyx_L3; } /* "borg/hashindex.pyx":103 * assert self.index, 'hashindex_read() returned NULL with no exception set' * else: * self.index = hashindex_init(capacity, self.key_size, self.value_size) # <<<<<<<<<<<<<< * if not self.index: * raise Exception('hashindex_init failed') */ /*else*/ { __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_capacity); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 103, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_value_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_15 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_15 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_self->index = hashindex_init(__pyx_t_2, __pyx_v_self->key_size, __pyx_t_15); /* "borg/hashindex.pyx":104 * else: * self.index = hashindex_init(capacity, self.key_size, self.value_size) * if not self.index: # <<<<<<<<<<<<<< * raise Exception('hashindex_init failed') * */ __pyx_t_3 = ((!(__pyx_v_self->index != 0)) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":105 * self.index = hashindex_init(capacity, self.key_size, self.value_size) * if not self.index: * raise Exception('hashindex_init failed') # <<<<<<<<<<<<<< * * def __dealloc__(self): */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 105, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 105, __pyx_L1_error) /* "borg/hashindex.pyx":104 * else: * self.index = hashindex_init(capacity, self.key_size, self.value_size) * if not self.index: # <<<<<<<<<<<<<< * raise Exception('hashindex_init failed') * */ } } __pyx_L3:; /* "borg/hashindex.pyx":93 * MAX_VALUE = _MAX_VALUE * * def __cinit__(self, capacity=0, path=None, permit_compact=False): # <<<<<<<<<<<<<< * self.key_size = self._key_size * if path: */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("borg.hashindex.IndexBase.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_fd); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":107 * raise Exception('hashindex_init failed') * * def __dealloc__(self): # <<<<<<<<<<<<<< * if self.index: * hashindex_free(self.index) */ /* Python wrapper */ static void __pyx_pw_4borg_9hashindex_9IndexBase_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4borg_9hashindex_9IndexBase_3__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_pf_4borg_9hashindex_9IndexBase_2__dealloc__(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4borg_9hashindex_9IndexBase_2__dealloc__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "borg/hashindex.pyx":108 * * def __dealloc__(self): * if self.index: # <<<<<<<<<<<<<< * hashindex_free(self.index) * */ __pyx_t_1 = (__pyx_v_self->index != 0); if (__pyx_t_1) { /* "borg/hashindex.pyx":109 * def __dealloc__(self): * if self.index: * hashindex_free(self.index) # <<<<<<<<<<<<<< * * @classmethod */ hashindex_free(__pyx_v_self->index); /* "borg/hashindex.pyx":108 * * def __dealloc__(self): * if self.index: # <<<<<<<<<<<<<< * hashindex_free(self.index) * */ } /* "borg/hashindex.pyx":107 * raise Exception('hashindex_init failed') * * def __dealloc__(self): # <<<<<<<<<<<<<< * if self.index: * hashindex_free(self.index) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "borg/hashindex.pyx":112 * * @classmethod * def read(cls, path, permit_compact=False): # <<<<<<<<<<<<<< * return cls(path=path, permit_compact=permit_compact) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_5read(PyObject *__pyx_v_cls, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_5read(PyObject *__pyx_v_cls, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_path = 0; PyObject *__pyx_v_permit_compact = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("read (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_path,&__pyx_n_s_permit_compact,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)Py_False); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_path)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_permit_compact); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "read") < 0)) __PYX_ERR(0, 112, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_path = values[0]; __pyx_v_permit_compact = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("read", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 112, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.IndexBase.read", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_4read(((PyTypeObject*)__pyx_v_cls), __pyx_v_path, __pyx_v_permit_compact); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_4read(PyTypeObject *__pyx_v_cls, PyObject *__pyx_v_path, PyObject *__pyx_v_permit_compact) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("read", 0); /* "borg/hashindex.pyx":113 * @classmethod * def read(cls, path, permit_compact=False): * return cls(path=path, permit_compact=permit_compact) # <<<<<<<<<<<<<< * * def write(self, path): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_path, __pyx_v_path) < 0) __PYX_ERR(0, 113, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_permit_compact, __pyx_v_permit_compact) < 0) __PYX_ERR(0, 113, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_v_cls), __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":112 * * @classmethod * def read(cls, path, permit_compact=False): # <<<<<<<<<<<<<< * return cls(path=path, permit_compact=permit_compact) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.hashindex.IndexBase.read", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":115 * return cls(path=path, permit_compact=permit_compact) * * def write(self, path): # <<<<<<<<<<<<<< * if isinstance(path, (str, bytes)): * with open(path, 'wb') as fd: */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_7write(PyObject *__pyx_v_self, PyObject *__pyx_v_path); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_7write(PyObject *__pyx_v_self, PyObject *__pyx_v_path) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("write (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_6write(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self), ((PyObject *)__pyx_v_path)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_6write(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_path) { PyObject *__pyx_v_fd = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("write", 0); /* "borg/hashindex.pyx":116 * * def write(self, path): * if isinstance(path, (str, bytes)): # <<<<<<<<<<<<<< * with open(path, 'wb') as fd: * hashindex_write(self.index, fd) */ __pyx_t_2 = PyUnicode_Check(__pyx_v_path); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = PyBytes_Check(__pyx_v_path); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "borg/hashindex.pyx":117 * def write(self, path): * if isinstance(path, (str, bytes)): * with open(path, 'wb') as fd: # <<<<<<<<<<<<<< * hashindex_write(self.index, fd) * else: */ /*with:*/ { __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_path); __Pyx_GIVEREF(__pyx_v_path); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_path); __Pyx_INCREF(__pyx_n_u_wb); __Pyx_GIVEREF(__pyx_n_u_wb); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_n_u_wb); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_LookupSpecial(__pyx_t_5, __pyx_n_s_exit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_t_5, __pyx_n_s_enter); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 117, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_4 = (__pyx_t_8) ? __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_8) : __Pyx_PyObject_CallNoArg(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 117, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __pyx_t_4; __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /*try:*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); /*try:*/ { __pyx_v_fd = __pyx_t_7; __pyx_t_7 = 0; /* "borg/hashindex.pyx":118 * if isinstance(path, (str, bytes)): * with open(path, 'wb') as fd: * hashindex_write(self.index, fd) # <<<<<<<<<<<<<< * else: * hashindex_write(self.index, path) */ hashindex_write(__pyx_v_self->index, __pyx_v_fd); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 118, __pyx_L10_error) /* "borg/hashindex.pyx":117 * def write(self, path): * if isinstance(path, (str, bytes)): * with open(path, 'wb') as fd: # <<<<<<<<<<<<<< * hashindex_write(self.index, fd) * else: */ } __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L15_try_end; __pyx_L10_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /*except:*/ { __Pyx_AddTraceback("borg.hashindex.IndexBase.write", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_5, &__pyx_t_4) < 0) __PYX_ERR(0, 117, __pyx_L12_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyTuple_Pack(3, __pyx_t_7, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 117, __pyx_L12_except_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 117, __pyx_L12_except_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_12); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; if (__pyx_t_2 < 0) __PYX_ERR(0, 117, __pyx_L12_except_error) __pyx_t_1 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ErrRestoreWithState(__pyx_t_7, __pyx_t_5, __pyx_t_4); __pyx_t_7 = 0; __pyx_t_5 = 0; __pyx_t_4 = 0; __PYX_ERR(0, 117, __pyx_L12_except_error) } __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L11_exception_handled; } __pyx_L12_except_error:; __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); goto __pyx_L1_error; __pyx_L11_exception_handled:; __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); __pyx_L15_try_end:; } } /*finally:*/ { /*normal exit:*/{ if (__pyx_t_6) { __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_tuple_, NULL); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } goto __pyx_L9; } __pyx_L9:; } goto __pyx_L19; __pyx_L6_error:; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L1_error; __pyx_L19:; } /* "borg/hashindex.pyx":116 * * def write(self, path): * if isinstance(path, (str, bytes)): # <<<<<<<<<<<<<< * with open(path, 'wb') as fd: * hashindex_write(self.index, fd) */ goto __pyx_L3; } /* "borg/hashindex.pyx":120 * hashindex_write(self.index, fd) * else: * hashindex_write(self.index, path) # <<<<<<<<<<<<<< * * def clear(self): */ /*else*/ { hashindex_write(__pyx_v_self->index, __pyx_v_path); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 120, __pyx_L1_error) } __pyx_L3:; /* "borg/hashindex.pyx":115 * return cls(path=path, permit_compact=permit_compact) * * def write(self, path): # <<<<<<<<<<<<<< * if isinstance(path, (str, bytes)): * with open(path, 'wb') as fd: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("borg.hashindex.IndexBase.write", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_fd); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":122 * hashindex_write(self.index, path) * * def clear(self): # <<<<<<<<<<<<<< * hashindex_free(self.index) * self.index = hashindex_init(0, self.key_size, self.value_size) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_9clear(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_9clear(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("clear (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_8clear(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_8clear(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("clear", 0); /* "borg/hashindex.pyx":123 * * def clear(self): * hashindex_free(self.index) # <<<<<<<<<<<<<< * self.index = hashindex_init(0, self.key_size, self.value_size) * if not self.index: */ hashindex_free(__pyx_v_self->index); /* "borg/hashindex.pyx":124 * def clear(self): * hashindex_free(self.index) * self.index = hashindex_init(0, self.key_size, self.value_size) # <<<<<<<<<<<<<< * if not self.index: * raise Exception('hashindex_init failed') */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_value_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 124, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_self->index = hashindex_init(0, __pyx_v_self->key_size, __pyx_t_2); /* "borg/hashindex.pyx":125 * hashindex_free(self.index) * self.index = hashindex_init(0, self.key_size, self.value_size) * if not self.index: # <<<<<<<<<<<<<< * raise Exception('hashindex_init failed') * */ __pyx_t_3 = ((!(__pyx_v_self->index != 0)) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":126 * self.index = hashindex_init(0, self.key_size, self.value_size) * if not self.index: * raise Exception('hashindex_init failed') # <<<<<<<<<<<<<< * * def setdefault(self, key, value): */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 126, __pyx_L1_error) /* "borg/hashindex.pyx":125 * hashindex_free(self.index) * self.index = hashindex_init(0, self.key_size, self.value_size) * if not self.index: # <<<<<<<<<<<<<< * raise Exception('hashindex_init failed') * */ } /* "borg/hashindex.pyx":122 * hashindex_write(self.index, path) * * def clear(self): # <<<<<<<<<<<<<< * hashindex_free(self.index) * self.index = hashindex_init(0, self.key_size, self.value_size) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.IndexBase.clear", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":128 * raise Exception('hashindex_init failed') * * def setdefault(self, key, value): # <<<<<<<<<<<<<< * if not key in self: * self[key] = value */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_11setdefault(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_11setdefault(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_key = 0; PyObject *__pyx_v_value = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("setdefault (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_key,&__pyx_n_s_value,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("setdefault", 1, 2, 2, 1); __PYX_ERR(0, 128, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "setdefault") < 0)) __PYX_ERR(0, 128, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_key = values[0]; __pyx_v_value = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("setdefault", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 128, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.IndexBase.setdefault", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_10setdefault(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self), __pyx_v_key, __pyx_v_value); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_10setdefault(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setdefault", 0); /* "borg/hashindex.pyx":129 * * def setdefault(self, key, value): * if not key in self: # <<<<<<<<<<<<<< * self[key] = value * */ __pyx_t_1 = (__Pyx_PySequence_ContainsTF(__pyx_v_key, ((PyObject *)__pyx_v_self), Py_NE)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 129, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "borg/hashindex.pyx":130 * def setdefault(self, key, value): * if not key in self: * self[key] = value # <<<<<<<<<<<<<< * * def __delitem__(self, key): */ if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_self), __pyx_v_key, __pyx_v_value) < 0)) __PYX_ERR(0, 130, __pyx_L1_error) /* "borg/hashindex.pyx":129 * * def setdefault(self, key, value): * if not key in self: # <<<<<<<<<<<<<< * self[key] = value * */ } /* "borg/hashindex.pyx":128 * raise Exception('hashindex_init failed') * * def setdefault(self, key, value): # <<<<<<<<<<<<<< * if not key in self: * self[key] = value */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("borg.hashindex.IndexBase.setdefault", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":132 * self[key] = value * * def __delitem__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * rc = hashindex_delete(self.index, key) */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_9IndexBase_13__delitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static int __pyx_pw_4borg_9hashindex_9IndexBase_13__delitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__delitem__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_12__delitem__(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_9IndexBase_12__delitem__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key) { int __pyx_v_rc; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__delitem__", 0); /* "borg/hashindex.pyx":133 * * def __delitem__(self, key): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * rc = hashindex_delete(self.index, key) * if rc == 1: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 133, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 133, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":134 * def __delitem__(self, key): * assert len(key) == self.key_size * rc = hashindex_delete(self.index, key) # <<<<<<<<<<<<<< * if rc == 1: * return # success */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 134, __pyx_L1_error) __pyx_v_rc = hashindex_delete(__pyx_v_self->index, ((char *)__pyx_t_2)); /* "borg/hashindex.pyx":135 * assert len(key) == self.key_size * rc = hashindex_delete(self.index, key) * if rc == 1: # <<<<<<<<<<<<<< * return # success * if rc == -1: */ __pyx_t_3 = ((__pyx_v_rc == 1) != 0); if (__pyx_t_3) { /* "borg/hashindex.pyx":136 * rc = hashindex_delete(self.index, key) * if rc == 1: * return # success # <<<<<<<<<<<<<< * if rc == -1: * raise KeyError(key) */ __pyx_r = 0; goto __pyx_L0; /* "borg/hashindex.pyx":135 * assert len(key) == self.key_size * rc = hashindex_delete(self.index, key) * if rc == 1: # <<<<<<<<<<<<<< * return # success * if rc == -1: */ } /* "borg/hashindex.pyx":137 * if rc == 1: * return # success * if rc == -1: # <<<<<<<<<<<<<< * raise KeyError(key) * if rc == 0: */ __pyx_t_3 = ((__pyx_v_rc == -1L) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":138 * return # success * if rc == -1: * raise KeyError(key) # <<<<<<<<<<<<<< * if rc == 0: * raise Exception('hashindex_delete failed') */ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_KeyError, __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 138, __pyx_L1_error) /* "borg/hashindex.pyx":137 * if rc == 1: * return # success * if rc == -1: # <<<<<<<<<<<<<< * raise KeyError(key) * if rc == 0: */ } /* "borg/hashindex.pyx":139 * if rc == -1: * raise KeyError(key) * if rc == 0: # <<<<<<<<<<<<<< * raise Exception('hashindex_delete failed') * */ __pyx_t_3 = ((__pyx_v_rc == 0) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":140 * raise KeyError(key) * if rc == 0: * raise Exception('hashindex_delete failed') # <<<<<<<<<<<<<< * * def get(self, key, default=None): */ __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 140, __pyx_L1_error) /* "borg/hashindex.pyx":139 * if rc == -1: * raise KeyError(key) * if rc == 0: # <<<<<<<<<<<<<< * raise Exception('hashindex_delete failed') * */ } /* "borg/hashindex.pyx":132 * self[key] = value * * def __delitem__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * rc = hashindex_delete(self.index, key) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.hashindex.IndexBase.__delitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":142 * raise Exception('hashindex_delete failed') * * def get(self, key, default=None): # <<<<<<<<<<<<<< * try: * return self[key] */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_15get(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_15get(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_key = 0; PyObject *__pyx_v_default = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_key,&__pyx_n_s_default,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_default); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "get") < 0)) __PYX_ERR(0, 142, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_key = values[0]; __pyx_v_default = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("get", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 142, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.IndexBase.get", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_14get(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self), __pyx_v_key, __pyx_v_default); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_14get(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_default) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get", 0); /* "borg/hashindex.pyx":143 * * def get(self, key, default=None): * try: # <<<<<<<<<<<<<< * return self[key] * except KeyError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "borg/hashindex.pyx":144 * def get(self, key, default=None): * try: * return self[key] # <<<<<<<<<<<<<< * except KeyError: * return default */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 144, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L7_try_return; /* "borg/hashindex.pyx":143 * * def get(self, key, default=None): * try: # <<<<<<<<<<<<<< * return self[key] * except KeyError: */ } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/hashindex.pyx":145 * try: * return self[key] * except KeyError: # <<<<<<<<<<<<<< * return default * */ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_KeyError); if (__pyx_t_5) { __Pyx_AddTraceback("borg.hashindex.IndexBase.get", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 145, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "borg/hashindex.pyx":146 * return self[key] * except KeyError: * return default # <<<<<<<<<<<<<< * * def pop(self, key, default=_NoDefault): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_default); __pyx_r = __pyx_v_default; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L6_except_return; } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "borg/hashindex.pyx":143 * * def get(self, key, default=None): * try: # <<<<<<<<<<<<<< * return self[key] * except KeyError: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L7_try_return:; __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L0; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L0; } /* "borg/hashindex.pyx":142 * raise Exception('hashindex_delete failed') * * def get(self, key, default=None): # <<<<<<<<<<<<<< * try: * return self[key] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("borg.hashindex.IndexBase.get", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":148 * return default * * def pop(self, key, default=_NoDefault): # <<<<<<<<<<<<<< * try: * value = self[key] */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_17pop(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_17pop(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_key = 0; PyObject *__pyx_v_default = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("pop (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_key,&__pyx_n_s_default,0}; PyObject* values[2] = {0,0}; values[1] = __pyx_k__4; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_default); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pop") < 0)) __PYX_ERR(0, 148, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_key = values[0]; __pyx_v_default = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("pop", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 148, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.IndexBase.pop", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_16pop(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self), __pyx_v_key, __pyx_v_default); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_16pop(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_default) { PyObject *__pyx_v_value = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pop", 0); /* "borg/hashindex.pyx":149 * * def pop(self, key, default=_NoDefault): * try: # <<<<<<<<<<<<<< * value = self[key] * del self[key] */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "borg/hashindex.pyx":150 * def pop(self, key, default=_NoDefault): * try: * value = self[key] # <<<<<<<<<<<<<< * del self[key] * return value */ __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 150, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_value = __pyx_t_4; __pyx_t_4 = 0; /* "borg/hashindex.pyx":151 * try: * value = self[key] * del self[key] # <<<<<<<<<<<<<< * return value * except KeyError: */ if (unlikely(PyObject_DelItem(((PyObject *)__pyx_v_self), __pyx_v_key) < 0)) __PYX_ERR(0, 151, __pyx_L3_error) /* "borg/hashindex.pyx":152 * value = self[key] * del self[key] * return value # <<<<<<<<<<<<<< * except KeyError: * if default != _NoDefault: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_value); __pyx_r = __pyx_v_value; goto __pyx_L7_try_return; /* "borg/hashindex.pyx":149 * * def pop(self, key, default=_NoDefault): * try: # <<<<<<<<<<<<<< * value = self[key] * del self[key] */ } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/hashindex.pyx":153 * del self[key] * return value * except KeyError: # <<<<<<<<<<<<<< * if default != _NoDefault: * return default */ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_KeyError); if (__pyx_t_5) { __Pyx_AddTraceback("borg.hashindex.IndexBase.pop", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 153, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "borg/hashindex.pyx":154 * return value * except KeyError: * if default != _NoDefault: # <<<<<<<<<<<<<< * return default * raise */ __pyx_t_8 = PyObject_RichCompare(__pyx_v_default, __pyx_v_4borg_9hashindex__NoDefault, Py_NE); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 154, __pyx_L5_except_error) __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 154, __pyx_L5_except_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (__pyx_t_9) { /* "borg/hashindex.pyx":155 * except KeyError: * if default != _NoDefault: * return default # <<<<<<<<<<<<<< * raise * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_default); __pyx_r = __pyx_v_default; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L6_except_return; /* "borg/hashindex.pyx":154 * return value * except KeyError: * if default != _NoDefault: # <<<<<<<<<<<<<< * return default * raise */ } /* "borg/hashindex.pyx":156 * if default != _NoDefault: * return default * raise # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_GIVEREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_ErrRestoreWithState(__pyx_t_4, __pyx_t_6, __pyx_t_7); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __PYX_ERR(0, 156, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "borg/hashindex.pyx":149 * * def pop(self, key, default=_NoDefault): * try: # <<<<<<<<<<<<<< * value = self[key] * del self[key] */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L7_try_return:; __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L0; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L0; } /* "borg/hashindex.pyx":148 * return default * * def pop(self, key, default=_NoDefault): # <<<<<<<<<<<<<< * try: * value = self[key] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("borg.hashindex.IndexBase.pop", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_value); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":158 * raise * * def __len__(self): # <<<<<<<<<<<<<< * return hashindex_len(self.index) * */ /* Python wrapper */ static Py_ssize_t __pyx_pw_4borg_9hashindex_9IndexBase_19__len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_pw_4borg_9hashindex_9IndexBase_19__len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_18__len__(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_pf_4borg_9hashindex_9IndexBase_18__len__(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "borg/hashindex.pyx":159 * * def __len__(self): * return hashindex_len(self.index) # <<<<<<<<<<<<<< * * def size(self): */ __pyx_r = hashindex_len(__pyx_v_self->index); goto __pyx_L0; /* "borg/hashindex.pyx":158 * raise * * def __len__(self): # <<<<<<<<<<<<<< * return hashindex_len(self.index) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":161 * return hashindex_len(self.index) * * def size(self): # <<<<<<<<<<<<<< * """Return size (bytes) of hash table.""" * return hashindex_size(self.index) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_21size(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static char __pyx_doc_4borg_9hashindex_9IndexBase_20size[] = "Return size (bytes) of hash table."; static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_21size(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("size (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_20size(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_20size(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("size", 0); /* "borg/hashindex.pyx":163 * def size(self): * """Return size (bytes) of hash table.""" * return hashindex_size(self.index) # <<<<<<<<<<<<<< * * def compact(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(hashindex_size(__pyx_v_self->index)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":161 * return hashindex_len(self.index) * * def size(self): # <<<<<<<<<<<<<< * """Return size (bytes) of hash table.""" * return hashindex_size(self.index) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.IndexBase.size", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":165 * return hashindex_size(self.index) * * def compact(self): # <<<<<<<<<<<<<< * return hashindex_compact(self.index) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_23compact(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_23compact(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compact (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_22compact(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_22compact(struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compact", 0); /* "borg/hashindex.pyx":166 * * def compact(self): * return hashindex_compact(self.index) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_uint64_t(hashindex_compact(__pyx_v_self->index)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":165 * return hashindex_size(self.index) * * def compact(self): # <<<<<<<<<<<<<< * return hashindex_compact(self.index) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.IndexBase.compact", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_25__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_25__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_24__reduce_cython__(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_24__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.IndexBase.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_27__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_9IndexBase_27__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_9IndexBase_26__setstate_cython__(((struct __pyx_obj_4borg_9hashindex_IndexBase *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_9IndexBase_26__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_IndexBase *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.IndexBase.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":174 * _key_size = 16 * * def __getitem__(self, key): # <<<<<<<<<<<<<< * cdef FuseVersionsElement *data * assert len(key) == self.key_size */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_1__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_1__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17FuseVersionsIndex___getitem__(((struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17FuseVersionsIndex___getitem__(struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, PyObject *__pyx_v_key) { FuseVersionsElement *__pyx_v_data; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "borg/hashindex.pyx":176 * def __getitem__(self, key): * cdef FuseVersionsElement *data * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data = hashindex_get(self.index, key) * if data == NULL: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 176, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 176, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":177 * cdef FuseVersionsElement *data * assert len(key) == self.key_size * data = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if data == NULL: * raise KeyError(key) */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 177, __pyx_L1_error) __pyx_v_data = ((FuseVersionsElement *)hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2))); /* "borg/hashindex.pyx":178 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if data == NULL: # <<<<<<<<<<<<<< * raise KeyError(key) * return _le32toh(data.version), PyBytes_FromStringAndSize(data.hash, 16) */ __pyx_t_3 = ((__pyx_v_data == NULL) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":179 * data = hashindex_get(self.index, key) * if data == NULL: * raise KeyError(key) # <<<<<<<<<<<<<< * return _le32toh(data.version), PyBytes_FromStringAndSize(data.hash, 16) * */ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_KeyError, __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 179, __pyx_L1_error) /* "borg/hashindex.pyx":178 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if data == NULL: # <<<<<<<<<<<<<< * raise KeyError(key) * return _le32toh(data.version), PyBytes_FromStringAndSize(data.hash, 16) */ } /* "borg/hashindex.pyx":180 * if data == NULL: * raise KeyError(key) * return _le32toh(data.version), PyBytes_FromStringAndSize(data.hash, 16) # <<<<<<<<<<<<<< * * def __setitem__(self, key, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyInt_From_uint32_t(_le32toh(__pyx_v_data->version)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyBytes_FromStringAndSize(__pyx_v_data->hash, 16); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":174 * _key_size = 16 * * def __getitem__(self, key): # <<<<<<<<<<<<<< * cdef FuseVersionsElement *data * assert len(key) == self.key_size */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("borg.hashindex.FuseVersionsIndex.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":182 * return _le32toh(data.version), PyBytes_FromStringAndSize(data.hash, 16) * * def __setitem__(self, key, value): # <<<<<<<<<<<<<< * cdef FuseVersionsElement data * assert len(key) == self.key_size */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_17FuseVersionsIndex_3__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value); /*proto*/ static int __pyx_pw_4borg_9hashindex_17FuseVersionsIndex_3__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_2__setitem__(((struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_2__setitem__(struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value) { FuseVersionsElement __pyx_v_data; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; uint32_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "borg/hashindex.pyx":184 * def __setitem__(self, key, value): * cdef FuseVersionsElement data * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data.version = value[0] * assert data.version <= _MAX_VALUE, "maximum number of versions reached" */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 184, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 184, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":185 * cdef FuseVersionsElement data * assert len(key) == self.key_size * data.version = value[0] # <<<<<<<<<<<<<< * assert data.version <= _MAX_VALUE, "maximum number of versions reached" * if not PyBytes_CheckExact(value[1]) or PyBytes_GET_SIZE(value[1]) != 16: */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_As_uint32_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_data.version = __pyx_t_3; /* "borg/hashindex.pyx":186 * assert len(key) == self.key_size * data.version = value[0] * assert data.version <= _MAX_VALUE, "maximum number of versions reached" # <<<<<<<<<<<<<< * if not PyBytes_CheckExact(value[1]) or PyBytes_GET_SIZE(value[1]) != 16: * raise TypeError("Expected bytes of length 16 for second value") */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_data.version <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_maximum_number_of_versions_reach); __PYX_ERR(0, 186, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":187 * data.version = value[0] * assert data.version <= _MAX_VALUE, "maximum number of versions reached" * if not PyBytes_CheckExact(value[1]) or PyBytes_GET_SIZE(value[1]) != 16: # <<<<<<<<<<<<<< * raise TypeError("Expected bytes of length 16 for second value") * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = ((!(PyBytes_CheckExact(__pyx_t_2) != 0)) != 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!__pyx_t_5) { } else { __pyx_t_4 = __pyx_t_5; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = ((PyBytes_GET_SIZE(__pyx_t_2) != 16) != 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_4)) { /* "borg/hashindex.pyx":188 * assert data.version <= _MAX_VALUE, "maximum number of versions reached" * if not PyBytes_CheckExact(value[1]) or PyBytes_GET_SIZE(value[1]) != 16: * raise TypeError("Expected bytes of length 16 for second value") # <<<<<<<<<<<<<< * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) * data.version = _htole32(data.version) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 188, __pyx_L1_error) /* "borg/hashindex.pyx":187 * data.version = value[0] * assert data.version <= _MAX_VALUE, "maximum number of versions reached" * if not PyBytes_CheckExact(value[1]) or PyBytes_GET_SIZE(value[1]) != 16: # <<<<<<<<<<<<<< * raise TypeError("Expected bytes of length 16 for second value") * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) */ } /* "borg/hashindex.pyx":189 * if not PyBytes_CheckExact(value[1]) or PyBytes_GET_SIZE(value[1]) != 16: * raise TypeError("Expected bytes of length 16 for second value") * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) # <<<<<<<<<<<<<< * data.version = _htole32(data.version) * if not hashindex_set(self.index, key, &data): */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); (void)(memcpy(__pyx_v_data.hash, PyBytes_AS_STRING(__pyx_t_2), 16)); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/hashindex.pyx":190 * raise TypeError("Expected bytes of length 16 for second value") * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) * data.version = _htole32(data.version) # <<<<<<<<<<<<<< * if not hashindex_set(self.index, key, &data): * raise Exception('hashindex_set failed') */ __pyx_v_data.version = _htole32(__pyx_v_data.version); /* "borg/hashindex.pyx":191 * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) * data.version = _htole32(data.version) * if not hashindex_set(self.index, key, &data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ __pyx_t_6 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 191, __pyx_L1_error) __pyx_t_4 = ((!(hashindex_set(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_6), ((void *)(&__pyx_v_data))) != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "borg/hashindex.pyx":192 * data.version = _htole32(data.version) * if not hashindex_set(self.index, key, &data): * raise Exception('hashindex_set failed') # <<<<<<<<<<<<<< * * def __contains__(self, key): */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 192, __pyx_L1_error) /* "borg/hashindex.pyx":191 * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) * data.version = _htole32(data.version) * if not hashindex_set(self.index, key, &data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ } /* "borg/hashindex.pyx":182 * return _le32toh(data.version), PyBytes_FromStringAndSize(data.hash, 16) * * def __setitem__(self, key, value): # <<<<<<<<<<<<<< * cdef FuseVersionsElement data * assert len(key) == self.key_size */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.hashindex.FuseVersionsIndex.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":194 * raise Exception('hashindex_set failed') * * def __contains__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * return hashindex_get(self.index, key) != NULL */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_17FuseVersionsIndex_5__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static int __pyx_pw_4borg_9hashindex_17FuseVersionsIndex_5__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__contains__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_4__contains__(((struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_4__contains__(struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, PyObject *__pyx_v_key) { int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__contains__", 0); /* "borg/hashindex.pyx":195 * * def __contains__(self, key): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * return hashindex_get(self.index, key) != NULL * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 195, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 195, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":196 * def __contains__(self, key): * assert len(key) == self.key_size * return hashindex_get(self.index, key) != NULL # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 196, __pyx_L1_error) __pyx_r = (hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2)) != NULL); goto __pyx_L0; /* "borg/hashindex.pyx":194 * raise Exception('hashindex_set failed') * * def __contains__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * return hashindex_get(self.index, key) != NULL */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.hashindex.FuseVersionsIndex.__contains__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_6__reduce_cython__(((struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17FuseVersionsIndex_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.FuseVersionsIndex.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17FuseVersionsIndex_8__setstate_cython__(((struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17FuseVersionsIndex_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.FuseVersionsIndex.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":203 * value_size = 8 * * def __getitem__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * data = hashindex_get(self.index, key) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_1__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_1__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_7NSIndex___getitem__(((struct __pyx_obj_4borg_9hashindex_NSIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex___getitem__(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_key) { uint32_t *__pyx_v_data; uint32_t __pyx_v_segment; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "borg/hashindex.pyx":204 * * def __getitem__(self, key): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data = hashindex_get(self.index, key) * if not data: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 204, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 204, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":205 * def __getitem__(self, key): * assert len(key) == self.key_size * data = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if not data: * raise KeyError(key) */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 205, __pyx_L1_error) __pyx_v_data = ((uint32_t *)hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2))); /* "borg/hashindex.pyx":206 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t segment = _le32toh(data[0]) */ __pyx_t_3 = ((!(__pyx_v_data != 0)) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":207 * data = hashindex_get(self.index, key) * if not data: * raise KeyError(key) # <<<<<<<<<<<<<< * cdef uint32_t segment = _le32toh(data[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" */ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_KeyError, __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 207, __pyx_L1_error) /* "borg/hashindex.pyx":206 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t segment = _le32toh(data[0]) */ } /* "borg/hashindex.pyx":208 * if not data: * raise KeyError(key) * cdef uint32_t segment = _le32toh(data[0]) # <<<<<<<<<<<<<< * assert segment <= _MAX_VALUE, "maximum number of segments reached" * return segment, _le32toh(data[1]) */ __pyx_v_segment = _le32toh((__pyx_v_data[0])); /* "borg/hashindex.pyx":209 * raise KeyError(key) * cdef uint32_t segment = _le32toh(data[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" # <<<<<<<<<<<<<< * return segment, _le32toh(data[1]) * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_segment <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_maximum_number_of_segments_reach); __PYX_ERR(0, 209, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":210 * cdef uint32_t segment = _le32toh(data[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" * return segment, _le32toh(data[1]) # <<<<<<<<<<<<<< * * def __setitem__(self, key, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyInt_From_uint32_t(__pyx_v_segment); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_data[1]))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":203 * value_size = 8 * * def __getitem__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * data = hashindex_get(self.index, key) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("borg.hashindex.NSIndex.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":212 * return segment, _le32toh(data[1]) * * def __setitem__(self, key, value): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * cdef uint32_t[2] data */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_7NSIndex_3__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value); /*proto*/ static int __pyx_pw_4borg_9hashindex_7NSIndex_3__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_7NSIndex_2__setitem__(((struct __pyx_obj_4borg_9hashindex_NSIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_7NSIndex_2__setitem__(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value) { uint32_t __pyx_v_data[2]; uint32_t __pyx_v_segment; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; uint32_t __pyx_t_3; char *__pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "borg/hashindex.pyx":213 * * def __setitem__(self, key, value): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * cdef uint32_t[2] data * cdef uint32_t segment = value[0] */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 213, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 213, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":215 * assert len(key) == self.key_size * cdef uint32_t[2] data * cdef uint32_t segment = value[0] # <<<<<<<<<<<<<< * assert segment <= _MAX_VALUE, "maximum number of segments reached" * data[0] = _htole32(segment) */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_As_uint32_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 215, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_segment = __pyx_t_3; /* "borg/hashindex.pyx":216 * cdef uint32_t[2] data * cdef uint32_t segment = value[0] * assert segment <= _MAX_VALUE, "maximum number of segments reached" # <<<<<<<<<<<<<< * data[0] = _htole32(segment) * data[1] = _htole32(value[1]) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_segment <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_maximum_number_of_segments_reach); __PYX_ERR(0, 216, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":217 * cdef uint32_t segment = value[0] * assert segment <= _MAX_VALUE, "maximum number of segments reached" * data[0] = _htole32(segment) # <<<<<<<<<<<<<< * data[1] = _htole32(value[1]) * if not hashindex_set(self.index, key, data): */ (__pyx_v_data[0]) = _htole32(__pyx_v_segment); /* "borg/hashindex.pyx":218 * assert segment <= _MAX_VALUE, "maximum number of segments reached" * data[0] = _htole32(segment) * data[1] = _htole32(value[1]) # <<<<<<<<<<<<<< * if not hashindex_set(self.index, key, data): * raise Exception('hashindex_set failed') */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_As_uint32_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 218, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; (__pyx_v_data[1]) = _htole32(__pyx_t_3); /* "borg/hashindex.pyx":219 * data[0] = _htole32(segment) * data[1] = _htole32(value[1]) * if not hashindex_set(self.index, key, data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ __pyx_t_4 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_4) && PyErr_Occurred())) __PYX_ERR(0, 219, __pyx_L1_error) __pyx_t_5 = ((!(hashindex_set(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_4), __pyx_v_data) != 0)) != 0); if (unlikely(__pyx_t_5)) { /* "borg/hashindex.pyx":220 * data[1] = _htole32(value[1]) * if not hashindex_set(self.index, key, data): * raise Exception('hashindex_set failed') # <<<<<<<<<<<<<< * * def __contains__(self, key): */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 220, __pyx_L1_error) /* "borg/hashindex.pyx":219 * data[0] = _htole32(segment) * data[1] = _htole32(value[1]) * if not hashindex_set(self.index, key, data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ } /* "borg/hashindex.pyx":212 * return segment, _le32toh(data[1]) * * def __setitem__(self, key, value): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * cdef uint32_t[2] data */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.hashindex.NSIndex.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":222 * raise Exception('hashindex_set failed') * * def __contains__(self, key): # <<<<<<<<<<<<<< * cdef uint32_t segment * assert len(key) == self.key_size */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_7NSIndex_5__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static int __pyx_pw_4borg_9hashindex_7NSIndex_5__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__contains__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_7NSIndex_4__contains__(((struct __pyx_obj_4borg_9hashindex_NSIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_7NSIndex_4__contains__(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_key) { uint32_t __pyx_v_segment; uint32_t *__pyx_v_data; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__contains__", 0); /* "borg/hashindex.pyx":224 * def __contains__(self, key): * cdef uint32_t segment * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data = hashindex_get(self.index, key) * if data != NULL: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 224, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 224, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":225 * cdef uint32_t segment * assert len(key) == self.key_size * data = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if data != NULL: * segment = _le32toh(data[0]) */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 225, __pyx_L1_error) __pyx_v_data = ((uint32_t *)hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2))); /* "borg/hashindex.pyx":226 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if data != NULL: # <<<<<<<<<<<<<< * segment = _le32toh(data[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" */ __pyx_t_3 = ((__pyx_v_data != NULL) != 0); if (__pyx_t_3) { /* "borg/hashindex.pyx":227 * data = hashindex_get(self.index, key) * if data != NULL: * segment = _le32toh(data[0]) # <<<<<<<<<<<<<< * assert segment <= _MAX_VALUE, "maximum number of segments reached" * return data != NULL */ __pyx_v_segment = _le32toh((__pyx_v_data[0])); /* "borg/hashindex.pyx":228 * if data != NULL: * segment = _le32toh(data[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" # <<<<<<<<<<<<<< * return data != NULL * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_segment <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_maximum_number_of_segments_reach); __PYX_ERR(0, 228, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":226 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if data != NULL: # <<<<<<<<<<<<<< * segment = _le32toh(data[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" */ } /* "borg/hashindex.pyx":229 * segment = _le32toh(data[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" * return data != NULL # <<<<<<<<<<<<<< * * def iteritems(self, marker=None): */ __pyx_r = (__pyx_v_data != NULL); goto __pyx_L0; /* "borg/hashindex.pyx":222 * raise Exception('hashindex_set failed') * * def __contains__(self, key): # <<<<<<<<<<<<<< * cdef uint32_t segment * assert len(key) == self.key_size */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.hashindex.NSIndex.__contains__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":231 * return data != NULL * * def iteritems(self, marker=None): # <<<<<<<<<<<<<< * cdef const void *key * iter = NSKeyIterator(self.key_size) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_7iteritems(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_7iteritems(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_marker = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("iteritems (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_marker,0}; PyObject* values[1] = {0}; values[0] = ((PyObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_marker); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "iteritems") < 0)) __PYX_ERR(0, 231, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_marker = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("iteritems", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 231, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.NSIndex.iteritems", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_7NSIndex_6iteritems(((struct __pyx_obj_4borg_9hashindex_NSIndex *)__pyx_v_self), __pyx_v_marker); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex_6iteritems(struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, PyObject *__pyx_v_marker) { void const *__pyx_v_key; struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_iter = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; HashIndex *__pyx_t_3; int __pyx_t_4; char *__pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("iteritems", 0); /* "borg/hashindex.pyx":233 * def iteritems(self, marker=None): * cdef const void *key * iter = NSKeyIterator(self.key_size) # <<<<<<<<<<<<<< * iter.idx = self * iter.index = self.index */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.key_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_4borg_9hashindex_NSKeyIterator), __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_iter = ((struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)__pyx_t_2); __pyx_t_2 = 0; /* "borg/hashindex.pyx":234 * cdef const void *key * iter = NSKeyIterator(self.key_size) * iter.idx = self # <<<<<<<<<<<<<< * iter.index = self.index * if marker: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_iter->idx); __Pyx_DECREF(((PyObject *)__pyx_v_iter->idx)); __pyx_v_iter->idx = __pyx_v_self; /* "borg/hashindex.pyx":235 * iter = NSKeyIterator(self.key_size) * iter.idx = self * iter.index = self.index # <<<<<<<<<<<<<< * if marker: * key = hashindex_get(self.index, marker) */ __pyx_t_3 = __pyx_v_self->__pyx_base.index; __pyx_v_iter->index = __pyx_t_3; /* "borg/hashindex.pyx":236 * iter.idx = self * iter.index = self.index * if marker: # <<<<<<<<<<<<<< * key = hashindex_get(self.index, marker) * if marker is None: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_marker); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 236, __pyx_L1_error) if (__pyx_t_4) { /* "borg/hashindex.pyx":237 * iter.index = self.index * if marker: * key = hashindex_get(self.index, marker) # <<<<<<<<<<<<<< * if marker is None: * raise IndexError */ __pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_marker); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 237, __pyx_L1_error) __pyx_v_key = hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_5)); /* "borg/hashindex.pyx":238 * if marker: * key = hashindex_get(self.index, marker) * if marker is None: # <<<<<<<<<<<<<< * raise IndexError * iter.key = key - self.key_size */ __pyx_t_4 = (__pyx_v_marker == Py_None); __pyx_t_6 = (__pyx_t_4 != 0); if (unlikely(__pyx_t_6)) { /* "borg/hashindex.pyx":239 * key = hashindex_get(self.index, marker) * if marker is None: * raise IndexError # <<<<<<<<<<<<<< * iter.key = key - self.key_size * return iter */ __Pyx_Raise(__pyx_builtin_IndexError, 0, 0, 0); __PYX_ERR(0, 239, __pyx_L1_error) /* "borg/hashindex.pyx":238 * if marker: * key = hashindex_get(self.index, marker) * if marker is None: # <<<<<<<<<<<<<< * raise IndexError * iter.key = key - self.key_size */ } /* "borg/hashindex.pyx":240 * if marker is None: * raise IndexError * iter.key = key - self.key_size # <<<<<<<<<<<<<< * return iter * */ __pyx_v_iter->key = (__pyx_v_key - __pyx_v_self->__pyx_base.key_size); /* "borg/hashindex.pyx":236 * iter.idx = self * iter.index = self.index * if marker: # <<<<<<<<<<<<<< * key = hashindex_get(self.index, marker) * if marker is None: */ } /* "borg/hashindex.pyx":241 * raise IndexError * iter.key = key - self.key_size * return iter # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_iter)); __pyx_r = ((PyObject *)__pyx_v_iter); goto __pyx_L0; /* "borg/hashindex.pyx":231 * return data != NULL * * def iteritems(self, marker=None): # <<<<<<<<<<<<<< * cdef const void *key * iter = NSKeyIterator(self.key_size) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.hashindex.NSIndex.iteritems", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_iter); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_9__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_9__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_7NSIndex_8__reduce_cython__(((struct __pyx_obj_4borg_9hashindex_NSIndex *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex_8__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.NSIndex.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_11__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_7NSIndex_11__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_7NSIndex_10__setstate_cython__(((struct __pyx_obj_4borg_9hashindex_NSIndex *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_7NSIndex_10__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSIndex *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.NSIndex.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":251 * cdef int exhausted * * def __cinit__(self, key_size): # <<<<<<<<<<<<<< * self.key = NULL * self.key_size = key_size */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_13NSKeyIterator_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4borg_9hashindex_13NSKeyIterator_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_key_size = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_key_size_2,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key_size_2)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(0, 251, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_key_size = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 251, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.NSKeyIterator.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_13NSKeyIterator___cinit__(((struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)__pyx_v_self), __pyx_v_key_size); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_13NSKeyIterator___cinit__(struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self, PyObject *__pyx_v_key_size) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "borg/hashindex.pyx":252 * * def __cinit__(self, key_size): * self.key = NULL # <<<<<<<<<<<<<< * self.key_size = key_size * self.exhausted = 0 */ __pyx_v_self->key = NULL; /* "borg/hashindex.pyx":253 * def __cinit__(self, key_size): * self.key = NULL * self.key_size = key_size # <<<<<<<<<<<<<< * self.exhausted = 0 * */ __pyx_t_1 = __Pyx_PyInt_As_int(__pyx_v_key_size); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 253, __pyx_L1_error) __pyx_v_self->key_size = __pyx_t_1; /* "borg/hashindex.pyx":254 * self.key = NULL * self.key_size = key_size * self.exhausted = 0 # <<<<<<<<<<<<<< * * def __iter__(self): */ __pyx_v_self->exhausted = 0; /* "borg/hashindex.pyx":251 * cdef int exhausted * * def __cinit__(self, key_size): # <<<<<<<<<<<<<< * self.key = NULL * self.key_size = key_size */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("borg.hashindex.NSKeyIterator.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":256 * self.exhausted = 0 * * def __iter__(self): # <<<<<<<<<<<<<< * return self * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_3__iter__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_3__iter__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_13NSKeyIterator_2__iter__(((struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_2__iter__(struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__iter__", 0); /* "borg/hashindex.pyx":257 * * def __iter__(self): * return self # <<<<<<<<<<<<<< * * def __next__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "borg/hashindex.pyx":256 * self.exhausted = 0 * * def __iter__(self): # <<<<<<<<<<<<<< * return self * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":259 * return self * * def __next__(self): # <<<<<<<<<<<<<< * if self.exhausted: * raise StopIteration */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_5__next__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_5__next__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_13NSKeyIterator_4__next__(((struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_4__next__(struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self) { uint32_t *__pyx_v_value; uint32_t __pyx_v_segment; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__next__", 0); /* "borg/hashindex.pyx":260 * * def __next__(self): * if self.exhausted: # <<<<<<<<<<<<<< * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) */ __pyx_t_1 = (__pyx_v_self->exhausted != 0); if (unlikely(__pyx_t_1)) { /* "borg/hashindex.pyx":261 * def __next__(self): * if self.exhausted: * raise StopIteration # <<<<<<<<<<<<<< * self.key = hashindex_next_key(self.index, self.key) * if not self.key: */ __Pyx_Raise(__pyx_builtin_StopIteration, 0, 0, 0); __PYX_ERR(0, 261, __pyx_L1_error) /* "borg/hashindex.pyx":260 * * def __next__(self): * if self.exhausted: # <<<<<<<<<<<<<< * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) */ } /* "borg/hashindex.pyx":262 * if self.exhausted: * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) # <<<<<<<<<<<<<< * if not self.key: * self.exhausted = 1 */ __pyx_v_self->key = hashindex_next_key(__pyx_v_self->index, ((char *)__pyx_v_self->key)); /* "borg/hashindex.pyx":263 * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) * if not self.key: # <<<<<<<<<<<<<< * self.exhausted = 1 * raise StopIteration */ __pyx_t_1 = ((!(__pyx_v_self->key != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "borg/hashindex.pyx":264 * self.key = hashindex_next_key(self.index, self.key) * if not self.key: * self.exhausted = 1 # <<<<<<<<<<<<<< * raise StopIteration * cdef uint32_t *value = (self.key + self.key_size) */ __pyx_v_self->exhausted = 1; /* "borg/hashindex.pyx":265 * if not self.key: * self.exhausted = 1 * raise StopIteration # <<<<<<<<<<<<<< * cdef uint32_t *value = (self.key + self.key_size) * cdef uint32_t segment = _le32toh(value[0]) */ __Pyx_Raise(__pyx_builtin_StopIteration, 0, 0, 0); __PYX_ERR(0, 265, __pyx_L1_error) /* "borg/hashindex.pyx":263 * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) * if not self.key: # <<<<<<<<<<<<<< * self.exhausted = 1 * raise StopIteration */ } /* "borg/hashindex.pyx":266 * self.exhausted = 1 * raise StopIteration * cdef uint32_t *value = (self.key + self.key_size) # <<<<<<<<<<<<<< * cdef uint32_t segment = _le32toh(value[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" */ __pyx_v_value = ((uint32_t *)(__pyx_v_self->key + __pyx_v_self->key_size)); /* "borg/hashindex.pyx":267 * raise StopIteration * cdef uint32_t *value = (self.key + self.key_size) * cdef uint32_t segment = _le32toh(value[0]) # <<<<<<<<<<<<<< * assert segment <= _MAX_VALUE, "maximum number of segments reached" * return (self.key)[:self.key_size], (segment, _le32toh(value[1])) */ __pyx_v_segment = _le32toh((__pyx_v_value[0])); /* "borg/hashindex.pyx":268 * cdef uint32_t *value = (self.key + self.key_size) * cdef uint32_t segment = _le32toh(value[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" # <<<<<<<<<<<<<< * return (self.key)[:self.key_size], (segment, _le32toh(value[1])) * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_segment <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_maximum_number_of_segments_reach); __PYX_ERR(0, 268, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":269 * cdef uint32_t segment = _le32toh(value[0]) * assert segment <= _MAX_VALUE, "maximum number of segments reached" * return (self.key)[:self.key_size], (segment, _le32toh(value[1])) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBytes_FromStringAndSize(((char *)__pyx_v_self->key) + 0, __pyx_v_self->key_size - 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_uint32_t(__pyx_v_segment); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_value[1]))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":259 * return self * * def __next__(self): # <<<<<<<<<<<<<< * if self.exhausted: * raise StopIteration */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.hashindex.NSKeyIterator.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_13NSKeyIterator_6__reduce_cython__(((struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.NSKeyIterator.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_13NSKeyIterator_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_13NSKeyIterator_8__setstate_cython__(((struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_13NSKeyIterator_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_NSKeyIterator *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.NSKeyIterator.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":294 * value_size = 12 * * def __getitem__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * data = hashindex_get(self.index, key) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_1__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_1__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex___getitem__(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex___getitem__(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key) { uint32_t *__pyx_v_data; uint32_t __pyx_v_refcount; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "borg/hashindex.pyx":295 * * def __getitem__(self, key): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data = hashindex_get(self.index, key) * if not data: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 295, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 295, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":296 * def __getitem__(self, key): * assert len(key) == self.key_size * data = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if not data: * raise KeyError(key) */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 296, __pyx_L1_error) __pyx_v_data = ((uint32_t *)hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2))); /* "borg/hashindex.pyx":297 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) */ __pyx_t_3 = ((!(__pyx_v_data != 0)) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":298 * data = hashindex_get(self.index, key) * if not data: * raise KeyError(key) # <<<<<<<<<<<<<< * cdef uint32_t refcount = _le32toh(data[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" */ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_KeyError, __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 298, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 298, __pyx_L1_error) /* "borg/hashindex.pyx":297 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) */ } /* "borg/hashindex.pyx":299 * if not data: * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) # <<<<<<<<<<<<<< * assert refcount <= _MAX_VALUE, "invalid reference count" * return ChunkIndexEntry(refcount, _le32toh(data[1]), _le32toh(data[2])) */ __pyx_v_refcount = _le32toh((__pyx_v_data[0])); /* "borg/hashindex.pyx":300 * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * return ChunkIndexEntry(refcount, _le32toh(data[1]), _le32toh(data[2])) * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 300, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":301 * cdef uint32_t refcount = _le32toh(data[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" * return ChunkIndexEntry(refcount, _le32toh(data[1]), _le32toh(data[2])) # <<<<<<<<<<<<<< * * def __setitem__(self, key, value): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_ChunkIndexEntry); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_uint32_t(__pyx_v_refcount); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_data[1]))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_data[2]))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = NULL; __pyx_t_10 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_10 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_9, __pyx_t_6, __pyx_t_7, __pyx_t_8}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_9, __pyx_t_6, __pyx_t_7, __pyx_t_8}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif { __pyx_t_11 = PyTuple_New(3+__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_10, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_10, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_11, 2+__pyx_t_10, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_11, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 301, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":294 * value_size = 12 * * def __getitem__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * data = hashindex_get(self.index, key) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":303 * return ChunkIndexEntry(refcount, _le32toh(data[1]), _le32toh(data[2])) * * def __setitem__(self, key, value): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * cdef uint32_t[3] data */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_10ChunkIndex_3__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value); /*proto*/ static int __pyx_pw_4borg_9hashindex_10ChunkIndex_3__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_2__setitem__(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_10ChunkIndex_2__setitem__(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_value) { uint32_t __pyx_v_data[3]; uint32_t __pyx_v_refcount; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; uint32_t __pyx_t_3; char *__pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "borg/hashindex.pyx":304 * * def __setitem__(self, key, value): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * cdef uint32_t[3] data * cdef uint32_t refcount = value[0] */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 304, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 304, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":306 * assert len(key) == self.key_size * cdef uint32_t[3] data * cdef uint32_t refcount = value[0] # <<<<<<<<<<<<<< * assert refcount <= _MAX_VALUE, "invalid reference count" * data[0] = _htole32(refcount) */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_As_uint32_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_refcount = __pyx_t_3; /* "borg/hashindex.pyx":307 * cdef uint32_t[3] data * cdef uint32_t refcount = value[0] * assert refcount <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * data[0] = _htole32(refcount) * data[1] = _htole32(value[1]) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 307, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":308 * cdef uint32_t refcount = value[0] * assert refcount <= _MAX_VALUE, "invalid reference count" * data[0] = _htole32(refcount) # <<<<<<<<<<<<<< * data[1] = _htole32(value[1]) * data[2] = _htole32(value[2]) */ (__pyx_v_data[0]) = _htole32(__pyx_v_refcount); /* "borg/hashindex.pyx":309 * assert refcount <= _MAX_VALUE, "invalid reference count" * data[0] = _htole32(refcount) * data[1] = _htole32(value[1]) # <<<<<<<<<<<<<< * data[2] = _htole32(value[2]) * if not hashindex_set(self.index, key, data): */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_As_uint32_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; (__pyx_v_data[1]) = _htole32(__pyx_t_3); /* "borg/hashindex.pyx":310 * data[0] = _htole32(refcount) * data[1] = _htole32(value[1]) * data[2] = _htole32(value[2]) # <<<<<<<<<<<<<< * if not hashindex_set(self.index, key, data): * raise Exception('hashindex_set failed') */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_value, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_As_uint32_t(__pyx_t_2); if (unlikely((__pyx_t_3 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; (__pyx_v_data[2]) = _htole32(__pyx_t_3); /* "borg/hashindex.pyx":311 * data[1] = _htole32(value[1]) * data[2] = _htole32(value[2]) * if not hashindex_set(self.index, key, data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ __pyx_t_4 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_4) && PyErr_Occurred())) __PYX_ERR(0, 311, __pyx_L1_error) __pyx_t_5 = ((!(hashindex_set(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_4), __pyx_v_data) != 0)) != 0); if (unlikely(__pyx_t_5)) { /* "borg/hashindex.pyx":312 * data[2] = _htole32(value[2]) * if not hashindex_set(self.index, key, data): * raise Exception('hashindex_set failed') # <<<<<<<<<<<<<< * * def __contains__(self, key): */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 312, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 312, __pyx_L1_error) /* "borg/hashindex.pyx":311 * data[1] = _htole32(value[1]) * data[2] = _htole32(value[2]) * if not hashindex_set(self.index, key, data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ } /* "borg/hashindex.pyx":303 * return ChunkIndexEntry(refcount, _le32toh(data[1]), _le32toh(data[2])) * * def __setitem__(self, key, value): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * cdef uint32_t[3] data */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":314 * raise Exception('hashindex_set failed') * * def __contains__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * data = hashindex_get(self.index, key) */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_10ChunkIndex_5__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static int __pyx_pw_4borg_9hashindex_10ChunkIndex_5__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__contains__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_4__contains__(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_10ChunkIndex_4__contains__(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key) { uint32_t *__pyx_v_data; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__contains__", 0); /* "borg/hashindex.pyx":315 * * def __contains__(self, key): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data = hashindex_get(self.index, key) * if data != NULL: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 315, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 315, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":316 * def __contains__(self, key): * assert len(key) == self.key_size * data = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if data != NULL: * assert _le32toh(data[0]) <= _MAX_VALUE, "invalid reference count" */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 316, __pyx_L1_error) __pyx_v_data = ((uint32_t *)hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2))); /* "borg/hashindex.pyx":317 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if data != NULL: # <<<<<<<<<<<<<< * assert _le32toh(data[0]) <= _MAX_VALUE, "invalid reference count" * return data != NULL */ __pyx_t_3 = ((__pyx_v_data != NULL) != 0); if (__pyx_t_3) { /* "borg/hashindex.pyx":318 * data = hashindex_get(self.index, key) * if data != NULL: * assert _le32toh(data[0]) <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * return data != NULL * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((_le32toh((__pyx_v_data[0])) <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 318, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":317 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if data != NULL: # <<<<<<<<<<<<<< * assert _le32toh(data[0]) <= _MAX_VALUE, "invalid reference count" * return data != NULL */ } /* "borg/hashindex.pyx":319 * if data != NULL: * assert _le32toh(data[0]) <= _MAX_VALUE, "invalid reference count" * return data != NULL # <<<<<<<<<<<<<< * * def incref(self, key): */ __pyx_r = (__pyx_v_data != NULL); goto __pyx_L0; /* "borg/hashindex.pyx":314 * raise Exception('hashindex_set failed') * * def __contains__(self, key): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * data = hashindex_get(self.index, key) */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.hashindex.ChunkIndex.__contains__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":321 * return data != NULL * * def incref(self, key): # <<<<<<<<<<<<<< * """Increase refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_7incref(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static char __pyx_doc_4borg_9hashindex_10ChunkIndex_6incref[] = "Increase refcount for 'key', return (refcount, size, csize)"; static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_7incref(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("incref (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_6incref(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_6incref(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key) { uint32_t *__pyx_v_data; uint32_t __pyx_v_refcount; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("incref", 0); /* "borg/hashindex.pyx":323 * def incref(self, key): * """Increase refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data = hashindex_get(self.index, key) * if not data: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 323, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 323, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":324 * """Increase refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size * data = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if not data: * raise KeyError(key) */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 324, __pyx_L1_error) __pyx_v_data = ((uint32_t *)hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2))); /* "borg/hashindex.pyx":325 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) */ __pyx_t_3 = ((!(__pyx_v_data != 0)) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":326 * data = hashindex_get(self.index, key) * if not data: * raise KeyError(key) # <<<<<<<<<<<<<< * cdef uint32_t refcount = _le32toh(data[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" */ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_KeyError, __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 326, __pyx_L1_error) /* "borg/hashindex.pyx":325 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) */ } /* "borg/hashindex.pyx":327 * if not data: * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) # <<<<<<<<<<<<<< * assert refcount <= _MAX_VALUE, "invalid reference count" * if refcount != _MAX_VALUE: */ __pyx_v_refcount = _le32toh((__pyx_v_data[0])); /* "borg/hashindex.pyx":328 * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * if refcount != _MAX_VALUE: * refcount += 1 */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 328, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":329 * cdef uint32_t refcount = _le32toh(data[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" * if refcount != _MAX_VALUE: # <<<<<<<<<<<<<< * refcount += 1 * data[0] = _htole32(refcount) */ __pyx_t_3 = ((__pyx_v_refcount != _MAX_VALUE) != 0); if (__pyx_t_3) { /* "borg/hashindex.pyx":330 * assert refcount <= _MAX_VALUE, "invalid reference count" * if refcount != _MAX_VALUE: * refcount += 1 # <<<<<<<<<<<<<< * data[0] = _htole32(refcount) * return refcount, _le32toh(data[1]), _le32toh(data[2]) */ __pyx_v_refcount = (__pyx_v_refcount + 1); /* "borg/hashindex.pyx":329 * cdef uint32_t refcount = _le32toh(data[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" * if refcount != _MAX_VALUE: # <<<<<<<<<<<<<< * refcount += 1 * data[0] = _htole32(refcount) */ } /* "borg/hashindex.pyx":331 * if refcount != _MAX_VALUE: * refcount += 1 * data[0] = _htole32(refcount) # <<<<<<<<<<<<<< * return refcount, _le32toh(data[1]), _le32toh(data[2]) * */ (__pyx_v_data[0]) = _htole32(__pyx_v_refcount); /* "borg/hashindex.pyx":332 * refcount += 1 * data[0] = _htole32(refcount) * return refcount, _le32toh(data[1]), _le32toh(data[2]) # <<<<<<<<<<<<<< * * def decref(self, key): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyInt_From_uint32_t(__pyx_v_refcount); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_data[1]))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_data[2]))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_6); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":321 * return data != NULL * * def incref(self, key): # <<<<<<<<<<<<<< * """Increase refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.incref", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":334 * return refcount, _le32toh(data[1]), _le32toh(data[2]) * * def decref(self, key): # <<<<<<<<<<<<<< * """Decrease refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_9decref(PyObject *__pyx_v_self, PyObject *__pyx_v_key); /*proto*/ static char __pyx_doc_4borg_9hashindex_10ChunkIndex_8decref[] = "Decrease refcount for 'key', return (refcount, size, csize)"; static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_9decref(PyObject *__pyx_v_self, PyObject *__pyx_v_key) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decref (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_8decref(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((PyObject *)__pyx_v_key)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_8decref(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key) { uint32_t *__pyx_v_data; uint32_t __pyx_v_refcount; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; char *__pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decref", 0); /* "borg/hashindex.pyx":336 * def decref(self, key): * """Decrease refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size # <<<<<<<<<<<<<< * data = hashindex_get(self.index, key) * if not data: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 336, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 336, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":337 * """Decrease refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size * data = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if not data: * raise KeyError(key) */ __pyx_t_2 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 337, __pyx_L1_error) __pyx_v_data = ((uint32_t *)hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_2))); /* "borg/hashindex.pyx":338 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) */ __pyx_t_3 = ((!(__pyx_v_data != 0)) != 0); if (unlikely(__pyx_t_3)) { /* "borg/hashindex.pyx":339 * data = hashindex_get(self.index, key) * if not data: * raise KeyError(key) # <<<<<<<<<<<<<< * cdef uint32_t refcount = _le32toh(data[0]) * # Never decrease a reference count of zero */ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_KeyError, __pyx_v_key); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 339, __pyx_L1_error) /* "borg/hashindex.pyx":338 * assert len(key) == self.key_size * data = hashindex_get(self.index, key) * if not data: # <<<<<<<<<<<<<< * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) */ } /* "borg/hashindex.pyx":340 * if not data: * raise KeyError(key) * cdef uint32_t refcount = _le32toh(data[0]) # <<<<<<<<<<<<<< * # Never decrease a reference count of zero * assert 0 < refcount <= _MAX_VALUE, "invalid reference count" */ __pyx_v_refcount = _le32toh((__pyx_v_data[0])); /* "borg/hashindex.pyx":342 * cdef uint32_t refcount = _le32toh(data[0]) * # Never decrease a reference count of zero * assert 0 < refcount <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * if refcount != _MAX_VALUE: * refcount -= 1 */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_3 = (0 < __pyx_v_refcount); if (__pyx_t_3) { __pyx_t_3 = (__pyx_v_refcount <= _MAX_VALUE); } if (unlikely(!(__pyx_t_3 != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 342, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":343 * # Never decrease a reference count of zero * assert 0 < refcount <= _MAX_VALUE, "invalid reference count" * if refcount != _MAX_VALUE: # <<<<<<<<<<<<<< * refcount -= 1 * data[0] = _htole32(refcount) */ __pyx_t_3 = ((__pyx_v_refcount != _MAX_VALUE) != 0); if (__pyx_t_3) { /* "borg/hashindex.pyx":344 * assert 0 < refcount <= _MAX_VALUE, "invalid reference count" * if refcount != _MAX_VALUE: * refcount -= 1 # <<<<<<<<<<<<<< * data[0] = _htole32(refcount) * return refcount, _le32toh(data[1]), _le32toh(data[2]) */ __pyx_v_refcount = (__pyx_v_refcount - 1); /* "borg/hashindex.pyx":343 * # Never decrease a reference count of zero * assert 0 < refcount <= _MAX_VALUE, "invalid reference count" * if refcount != _MAX_VALUE: # <<<<<<<<<<<<<< * refcount -= 1 * data[0] = _htole32(refcount) */ } /* "borg/hashindex.pyx":345 * if refcount != _MAX_VALUE: * refcount -= 1 * data[0] = _htole32(refcount) # <<<<<<<<<<<<<< * return refcount, _le32toh(data[1]), _le32toh(data[2]) * */ (__pyx_v_data[0]) = _htole32(__pyx_v_refcount); /* "borg/hashindex.pyx":346 * refcount -= 1 * data[0] = _htole32(refcount) * return refcount, _le32toh(data[1]), _le32toh(data[2]) # <<<<<<<<<<<<<< * * def iteritems(self, marker=None): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyInt_From_uint32_t(__pyx_v_refcount); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 346, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_data[1]))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 346, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_data[2]))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 346, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 346, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_6); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":334 * return refcount, _le32toh(data[1]), _le32toh(data[2]) * * def decref(self, key): # <<<<<<<<<<<<<< * """Decrease refcount for 'key', return (refcount, size, csize)""" * assert len(key) == self.key_size */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.decref", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":348 * return refcount, _le32toh(data[1]), _le32toh(data[2]) * * def iteritems(self, marker=None): # <<<<<<<<<<<<<< * cdef const void *key * iter = ChunkKeyIterator(self.key_size) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_11iteritems(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_11iteritems(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_marker = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("iteritems (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_marker,0}; PyObject* values[1] = {0}; values[0] = ((PyObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_marker); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "iteritems") < 0)) __PYX_ERR(0, 348, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_marker = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("iteritems", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 348, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.ChunkIndex.iteritems", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_10iteritems(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), __pyx_v_marker); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_10iteritems(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_marker) { void const *__pyx_v_key; struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_iter = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; HashIndex *__pyx_t_3; int __pyx_t_4; char *__pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("iteritems", 0); /* "borg/hashindex.pyx":350 * def iteritems(self, marker=None): * cdef const void *key * iter = ChunkKeyIterator(self.key_size) # <<<<<<<<<<<<<< * iter.idx = self * iter.index = self.index */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.key_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 350, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_4borg_9hashindex_ChunkKeyIterator), __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 350, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_iter = ((struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)__pyx_t_2); __pyx_t_2 = 0; /* "borg/hashindex.pyx":351 * cdef const void *key * iter = ChunkKeyIterator(self.key_size) * iter.idx = self # <<<<<<<<<<<<<< * iter.index = self.index * if marker: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_iter->idx); __Pyx_DECREF(((PyObject *)__pyx_v_iter->idx)); __pyx_v_iter->idx = __pyx_v_self; /* "borg/hashindex.pyx":352 * iter = ChunkKeyIterator(self.key_size) * iter.idx = self * iter.index = self.index # <<<<<<<<<<<<<< * if marker: * key = hashindex_get(self.index, marker) */ __pyx_t_3 = __pyx_v_self->__pyx_base.index; __pyx_v_iter->index = __pyx_t_3; /* "borg/hashindex.pyx":353 * iter.idx = self * iter.index = self.index * if marker: # <<<<<<<<<<<<<< * key = hashindex_get(self.index, marker) * if marker is None: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_marker); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 353, __pyx_L1_error) if (__pyx_t_4) { /* "borg/hashindex.pyx":354 * iter.index = self.index * if marker: * key = hashindex_get(self.index, marker) # <<<<<<<<<<<<<< * if marker is None: * raise IndexError */ __pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_marker); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 354, __pyx_L1_error) __pyx_v_key = hashindex_get(__pyx_v_self->__pyx_base.index, ((char *)__pyx_t_5)); /* "borg/hashindex.pyx":355 * if marker: * key = hashindex_get(self.index, marker) * if marker is None: # <<<<<<<<<<<<<< * raise IndexError * iter.key = key - self.key_size */ __pyx_t_4 = (__pyx_v_marker == Py_None); __pyx_t_6 = (__pyx_t_4 != 0); if (unlikely(__pyx_t_6)) { /* "borg/hashindex.pyx":356 * key = hashindex_get(self.index, marker) * if marker is None: * raise IndexError # <<<<<<<<<<<<<< * iter.key = key - self.key_size * return iter */ __Pyx_Raise(__pyx_builtin_IndexError, 0, 0, 0); __PYX_ERR(0, 356, __pyx_L1_error) /* "borg/hashindex.pyx":355 * if marker: * key = hashindex_get(self.index, marker) * if marker is None: # <<<<<<<<<<<<<< * raise IndexError * iter.key = key - self.key_size */ } /* "borg/hashindex.pyx":357 * if marker is None: * raise IndexError * iter.key = key - self.key_size # <<<<<<<<<<<<<< * return iter * */ __pyx_v_iter->key = (__pyx_v_key - __pyx_v_self->__pyx_base.key_size); /* "borg/hashindex.pyx":353 * iter.idx = self * iter.index = self.index * if marker: # <<<<<<<<<<<<<< * key = hashindex_get(self.index, marker) * if marker is None: */ } /* "borg/hashindex.pyx":358 * raise IndexError * iter.key = key - self.key_size * return iter # <<<<<<<<<<<<<< * * def summarize(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_iter)); __pyx_r = ((PyObject *)__pyx_v_iter); goto __pyx_L0; /* "borg/hashindex.pyx":348 * return refcount, _le32toh(data[1]), _le32toh(data[2]) * * def iteritems(self, marker=None): # <<<<<<<<<<<<<< * cdef const void *key * iter = ChunkKeyIterator(self.key_size) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.iteritems", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_iter); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":360 * return iter * * def summarize(self): # <<<<<<<<<<<<<< * cdef uint64_t size = 0, csize = 0, unique_size = 0, unique_csize = 0, chunks = 0, unique_chunks = 0 * cdef uint32_t *values */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_13summarize(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_13summarize(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("summarize (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_12summarize(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_12summarize(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self) { uint64_t __pyx_v_size; uint64_t __pyx_v_csize; uint64_t __pyx_v_unique_size; uint64_t __pyx_v_unique_csize; uint64_t __pyx_v_chunks; uint64_t __pyx_v_unique_chunks; uint32_t *__pyx_v_values; uint32_t __pyx_v_refcount; void *__pyx_v_key; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("summarize", 0); /* "borg/hashindex.pyx":361 * * def summarize(self): * cdef uint64_t size = 0, csize = 0, unique_size = 0, unique_csize = 0, chunks = 0, unique_chunks = 0 # <<<<<<<<<<<<<< * cdef uint32_t *values * cdef uint32_t refcount */ __pyx_v_size = 0; __pyx_v_csize = 0; __pyx_v_unique_size = 0; __pyx_v_unique_csize = 0; __pyx_v_chunks = 0; __pyx_v_unique_chunks = 0; /* "borg/hashindex.pyx":364 * cdef uint32_t *values * cdef uint32_t refcount * cdef void *key = NULL # <<<<<<<<<<<<<< * * while True: */ __pyx_v_key = NULL; /* "borg/hashindex.pyx":366 * cdef void *key = NULL * * while True: # <<<<<<<<<<<<<< * key = hashindex_next_key(self.index, key) * if not key: */ while (1) { /* "borg/hashindex.pyx":367 * * while True: * key = hashindex_next_key(self.index, key) # <<<<<<<<<<<<<< * if not key: * break */ __pyx_v_key = hashindex_next_key(__pyx_v_self->__pyx_base.index, __pyx_v_key); /* "borg/hashindex.pyx":368 * while True: * key = hashindex_next_key(self.index, key) * if not key: # <<<<<<<<<<<<<< * break * unique_chunks += 1 */ __pyx_t_1 = ((!(__pyx_v_key != 0)) != 0); if (__pyx_t_1) { /* "borg/hashindex.pyx":369 * key = hashindex_next_key(self.index, key) * if not key: * break # <<<<<<<<<<<<<< * unique_chunks += 1 * values = (key + self.key_size) */ goto __pyx_L4_break; /* "borg/hashindex.pyx":368 * while True: * key = hashindex_next_key(self.index, key) * if not key: # <<<<<<<<<<<<<< * break * unique_chunks += 1 */ } /* "borg/hashindex.pyx":370 * if not key: * break * unique_chunks += 1 # <<<<<<<<<<<<<< * values = (key + self.key_size) * refcount = _le32toh(values[0]) */ __pyx_v_unique_chunks = (__pyx_v_unique_chunks + 1); /* "borg/hashindex.pyx":371 * break * unique_chunks += 1 * values = (key + self.key_size) # <<<<<<<<<<<<<< * refcount = _le32toh(values[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" */ __pyx_v_values = ((uint32_t *)(__pyx_v_key + __pyx_v_self->__pyx_base.key_size)); /* "borg/hashindex.pyx":372 * unique_chunks += 1 * values = (key + self.key_size) * refcount = _le32toh(values[0]) # <<<<<<<<<<<<<< * assert refcount <= _MAX_VALUE, "invalid reference count" * chunks += refcount */ __pyx_v_refcount = _le32toh((__pyx_v_values[0])); /* "borg/hashindex.pyx":373 * values = (key + self.key_size) * refcount = _le32toh(values[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * chunks += refcount * unique_size += _le32toh(values[1]) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 373, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":374 * refcount = _le32toh(values[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" * chunks += refcount # <<<<<<<<<<<<<< * unique_size += _le32toh(values[1]) * unique_csize += _le32toh(values[2]) */ __pyx_v_chunks = (__pyx_v_chunks + __pyx_v_refcount); /* "borg/hashindex.pyx":375 * assert refcount <= _MAX_VALUE, "invalid reference count" * chunks += refcount * unique_size += _le32toh(values[1]) # <<<<<<<<<<<<<< * unique_csize += _le32toh(values[2]) * size += _le32toh(values[1]) * _le32toh(values[0]) */ __pyx_v_unique_size = (__pyx_v_unique_size + _le32toh((__pyx_v_values[1]))); /* "borg/hashindex.pyx":376 * chunks += refcount * unique_size += _le32toh(values[1]) * unique_csize += _le32toh(values[2]) # <<<<<<<<<<<<<< * size += _le32toh(values[1]) * _le32toh(values[0]) * csize += _le32toh(values[2]) * _le32toh(values[0]) */ __pyx_v_unique_csize = (__pyx_v_unique_csize + _le32toh((__pyx_v_values[2]))); /* "borg/hashindex.pyx":377 * unique_size += _le32toh(values[1]) * unique_csize += _le32toh(values[2]) * size += _le32toh(values[1]) * _le32toh(values[0]) # <<<<<<<<<<<<<< * csize += _le32toh(values[2]) * _le32toh(values[0]) * */ __pyx_v_size = (__pyx_v_size + (((uint64_t)_le32toh((__pyx_v_values[1]))) * _le32toh((__pyx_v_values[0])))); /* "borg/hashindex.pyx":378 * unique_csize += _le32toh(values[2]) * size += _le32toh(values[1]) * _le32toh(values[0]) * csize += _le32toh(values[2]) * _le32toh(values[0]) # <<<<<<<<<<<<<< * * return size, csize, unique_size, unique_csize, unique_chunks, chunks */ __pyx_v_csize = (__pyx_v_csize + (((uint64_t)_le32toh((__pyx_v_values[2]))) * _le32toh((__pyx_v_values[0])))); } __pyx_L4_break:; /* "borg/hashindex.pyx":380 * csize += _le32toh(values[2]) * _le32toh(values[0]) * * return size, csize, unique_size, unique_csize, unique_chunks, chunks # <<<<<<<<<<<<<< * * def stats_against(self, ChunkIndex master_index): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_uint64_t(__pyx_v_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_uint64_t(__pyx_v_csize); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_uint64_t(__pyx_v_unique_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_uint64_t(__pyx_v_unique_csize); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_uint64_t(__pyx_v_unique_chunks); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyInt_From_uint64_t(__pyx_v_chunks); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyTuple_New(6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 3, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 4, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 5, __pyx_t_7); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":360 * return iter * * def summarize(self): # <<<<<<<<<<<<<< * cdef uint64_t size = 0, csize = 0, unique_size = 0, unique_csize = 0, chunks = 0, unique_chunks = 0 * cdef uint32_t *values */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.summarize", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":382 * return size, csize, unique_size, unique_csize, unique_chunks, chunks * * def stats_against(self, ChunkIndex master_index): # <<<<<<<<<<<<<< * """ * Calculate chunk statistics of this index against *master_index*. */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_15stats_against(PyObject *__pyx_v_self, PyObject *__pyx_v_master_index); /*proto*/ static char __pyx_doc_4borg_9hashindex_10ChunkIndex_14stats_against[] = "\n Calculate chunk statistics of this index against *master_index*.\n\n A chunk is counted as unique if the number of references\n in this index matches the number of references in *master_index*.\n\n This index must be a subset of *master_index*.\n\n Return the same statistics tuple as summarize:\n size, csize, unique_size, unique_csize, unique_chunks, chunks.\n "; static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_15stats_against(PyObject *__pyx_v_self, PyObject *__pyx_v_master_index) { int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("stats_against (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_master_index), __pyx_ptype_4borg_9hashindex_ChunkIndex, 1, "master_index", 0))) __PYX_ERR(0, 382, __pyx_L1_error) __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_14stats_against(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_master_index)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_14stats_against(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_master_index) { uint64_t __pyx_v_size; uint64_t __pyx_v_csize; uint64_t __pyx_v_unique_size; uint64_t __pyx_v_unique_csize; uint64_t __pyx_v_chunks; uint64_t __pyx_v_unique_chunks; uint32_t __pyx_v_our_refcount; uint32_t __pyx_v_chunk_size; uint32_t __pyx_v_chunk_csize; uint32_t const *__pyx_v_our_values; uint32_t const *__pyx_v_master_values; void const *__pyx_v_key; HashIndex *__pyx_v_master; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations HashIndex *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("stats_against", 0); /* "borg/hashindex.pyx":394 * size, csize, unique_size, unique_csize, unique_chunks, chunks. * """ * cdef uint64_t size = 0, csize = 0, unique_size = 0, unique_csize = 0, chunks = 0, unique_chunks = 0 # <<<<<<<<<<<<<< * cdef uint32_t our_refcount, chunk_size, chunk_csize * cdef const uint32_t *our_values */ __pyx_v_size = 0; __pyx_v_csize = 0; __pyx_v_unique_size = 0; __pyx_v_unique_csize = 0; __pyx_v_chunks = 0; __pyx_v_unique_chunks = 0; /* "borg/hashindex.pyx":398 * cdef const uint32_t *our_values * cdef const uint32_t *master_values * cdef const void *key = NULL # <<<<<<<<<<<<<< * cdef HashIndex *master = master_index.index * */ __pyx_v_key = NULL; /* "borg/hashindex.pyx":399 * cdef const uint32_t *master_values * cdef const void *key = NULL * cdef HashIndex *master = master_index.index # <<<<<<<<<<<<<< * * while True: */ __pyx_t_1 = __pyx_v_master_index->__pyx_base.index; __pyx_v_master = __pyx_t_1; /* "borg/hashindex.pyx":401 * cdef HashIndex *master = master_index.index * * while True: # <<<<<<<<<<<<<< * key = hashindex_next_key(self.index, key) * if not key: */ while (1) { /* "borg/hashindex.pyx":402 * * while True: * key = hashindex_next_key(self.index, key) # <<<<<<<<<<<<<< * if not key: * break */ __pyx_v_key = hashindex_next_key(__pyx_v_self->__pyx_base.index, __pyx_v_key); /* "borg/hashindex.pyx":403 * while True: * key = hashindex_next_key(self.index, key) * if not key: # <<<<<<<<<<<<<< * break * our_values = (key + self.key_size) */ __pyx_t_2 = ((!(__pyx_v_key != 0)) != 0); if (__pyx_t_2) { /* "borg/hashindex.pyx":404 * key = hashindex_next_key(self.index, key) * if not key: * break # <<<<<<<<<<<<<< * our_values = (key + self.key_size) * master_values = hashindex_get(master, key) */ goto __pyx_L4_break; /* "borg/hashindex.pyx":403 * while True: * key = hashindex_next_key(self.index, key) * if not key: # <<<<<<<<<<<<<< * break * our_values = (key + self.key_size) */ } /* "borg/hashindex.pyx":405 * if not key: * break * our_values = (key + self.key_size) # <<<<<<<<<<<<<< * master_values = hashindex_get(master, key) * if not master_values: */ __pyx_v_our_values = ((uint32_t const *)(__pyx_v_key + __pyx_v_self->__pyx_base.key_size)); /* "borg/hashindex.pyx":406 * break * our_values = (key + self.key_size) * master_values = hashindex_get(master, key) # <<<<<<<<<<<<<< * if not master_values: * raise ValueError('stats_against: key contained in self but not in master_index.') */ __pyx_v_master_values = ((uint32_t const *)hashindex_get(__pyx_v_master, __pyx_v_key)); /* "borg/hashindex.pyx":407 * our_values = (key + self.key_size) * master_values = hashindex_get(master, key) * if not master_values: # <<<<<<<<<<<<<< * raise ValueError('stats_against: key contained in self but not in master_index.') * our_refcount = _le32toh(our_values[0]) */ __pyx_t_2 = ((!(__pyx_v_master_values != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "borg/hashindex.pyx":408 * master_values = hashindex_get(master, key) * if not master_values: * raise ValueError('stats_against: key contained in self but not in master_index.') # <<<<<<<<<<<<<< * our_refcount = _le32toh(our_values[0]) * chunk_size = _le32toh(master_values[1]) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 408, __pyx_L1_error) /* "borg/hashindex.pyx":407 * our_values = (key + self.key_size) * master_values = hashindex_get(master, key) * if not master_values: # <<<<<<<<<<<<<< * raise ValueError('stats_against: key contained in self but not in master_index.') * our_refcount = _le32toh(our_values[0]) */ } /* "borg/hashindex.pyx":409 * if not master_values: * raise ValueError('stats_against: key contained in self but not in master_index.') * our_refcount = _le32toh(our_values[0]) # <<<<<<<<<<<<<< * chunk_size = _le32toh(master_values[1]) * chunk_csize = _le32toh(master_values[2]) */ __pyx_v_our_refcount = _le32toh((__pyx_v_our_values[0])); /* "borg/hashindex.pyx":410 * raise ValueError('stats_against: key contained in self but not in master_index.') * our_refcount = _le32toh(our_values[0]) * chunk_size = _le32toh(master_values[1]) # <<<<<<<<<<<<<< * chunk_csize = _le32toh(master_values[2]) * */ __pyx_v_chunk_size = _le32toh((__pyx_v_master_values[1])); /* "borg/hashindex.pyx":411 * our_refcount = _le32toh(our_values[0]) * chunk_size = _le32toh(master_values[1]) * chunk_csize = _le32toh(master_values[2]) # <<<<<<<<<<<<<< * * chunks += our_refcount */ __pyx_v_chunk_csize = _le32toh((__pyx_v_master_values[2])); /* "borg/hashindex.pyx":413 * chunk_csize = _le32toh(master_values[2]) * * chunks += our_refcount # <<<<<<<<<<<<<< * size += chunk_size * our_refcount * csize += chunk_csize * our_refcount */ __pyx_v_chunks = (__pyx_v_chunks + __pyx_v_our_refcount); /* "borg/hashindex.pyx":414 * * chunks += our_refcount * size += chunk_size * our_refcount # <<<<<<<<<<<<<< * csize += chunk_csize * our_refcount * if our_values[0] == master_values[0]: */ __pyx_v_size = (__pyx_v_size + (((uint64_t)__pyx_v_chunk_size) * __pyx_v_our_refcount)); /* "borg/hashindex.pyx":415 * chunks += our_refcount * size += chunk_size * our_refcount * csize += chunk_csize * our_refcount # <<<<<<<<<<<<<< * if our_values[0] == master_values[0]: * # our refcount equals the master's refcount, so this chunk is unique to us */ __pyx_v_csize = (__pyx_v_csize + (((uint64_t)__pyx_v_chunk_csize) * __pyx_v_our_refcount)); /* "borg/hashindex.pyx":416 * size += chunk_size * our_refcount * csize += chunk_csize * our_refcount * if our_values[0] == master_values[0]: # <<<<<<<<<<<<<< * # our refcount equals the master's refcount, so this chunk is unique to us * unique_chunks += 1 */ __pyx_t_2 = (((__pyx_v_our_values[0]) == (__pyx_v_master_values[0])) != 0); if (__pyx_t_2) { /* "borg/hashindex.pyx":418 * if our_values[0] == master_values[0]: * # our refcount equals the master's refcount, so this chunk is unique to us * unique_chunks += 1 # <<<<<<<<<<<<<< * unique_size += chunk_size * unique_csize += chunk_csize */ __pyx_v_unique_chunks = (__pyx_v_unique_chunks + 1); /* "borg/hashindex.pyx":419 * # our refcount equals the master's refcount, so this chunk is unique to us * unique_chunks += 1 * unique_size += chunk_size # <<<<<<<<<<<<<< * unique_csize += chunk_csize * */ __pyx_v_unique_size = (__pyx_v_unique_size + __pyx_v_chunk_size); /* "borg/hashindex.pyx":420 * unique_chunks += 1 * unique_size += chunk_size * unique_csize += chunk_csize # <<<<<<<<<<<<<< * * return size, csize, unique_size, unique_csize, unique_chunks, chunks */ __pyx_v_unique_csize = (__pyx_v_unique_csize + __pyx_v_chunk_csize); /* "borg/hashindex.pyx":416 * size += chunk_size * our_refcount * csize += chunk_csize * our_refcount * if our_values[0] == master_values[0]: # <<<<<<<<<<<<<< * # our refcount equals the master's refcount, so this chunk is unique to us * unique_chunks += 1 */ } } __pyx_L4_break:; /* "borg/hashindex.pyx":422 * unique_csize += chunk_csize * * return size, csize, unique_size, unique_csize, unique_chunks, chunks # <<<<<<<<<<<<<< * * def add(self, key, refs, size, csize): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyInt_From_uint64_t(__pyx_v_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_uint64_t(__pyx_v_csize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_uint64_t(__pyx_v_unique_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_uint64_t(__pyx_v_unique_csize); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyInt_From_uint64_t(__pyx_v_unique_chunks); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyInt_From_uint64_t(__pyx_v_chunks); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = PyTuple_New(6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 3, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 4, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 5, __pyx_t_8); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":382 * return size, csize, unique_size, unique_csize, unique_chunks, chunks * * def stats_against(self, ChunkIndex master_index): # <<<<<<<<<<<<<< * """ * Calculate chunk statistics of this index against *master_index*. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.stats_against", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":424 * return size, csize, unique_size, unique_csize, unique_chunks, chunks * * def add(self, key, refs, size, csize): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * cdef uint32_t[3] data */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_17add(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_17add(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_key = 0; PyObject *__pyx_v_refs = 0; PyObject *__pyx_v_size = 0; PyObject *__pyx_v_csize = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("add (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_key,&__pyx_n_s_refs,&__pyx_n_s_size,&__pyx_n_s_csize,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_refs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add", 1, 4, 4, 1); __PYX_ERR(0, 424, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add", 1, 4, 4, 2); __PYX_ERR(0, 424, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_csize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add", 1, 4, 4, 3); __PYX_ERR(0, 424, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "add") < 0)) __PYX_ERR(0, 424, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_key = values[0]; __pyx_v_refs = values[1]; __pyx_v_size = values[2]; __pyx_v_csize = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("add", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 424, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.ChunkIndex.add", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_16add(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), __pyx_v_key, __pyx_v_refs, __pyx_v_size, __pyx_v_csize); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_16add(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, PyObject *__pyx_v_key, PyObject *__pyx_v_refs, PyObject *__pyx_v_size, PyObject *__pyx_v_csize) { uint32_t __pyx_v_data[3]; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; uint32_t __pyx_t_2; char *__pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("add", 0); /* "borg/hashindex.pyx":425 * * def add(self, key, refs, size, csize): * assert len(key) == self.key_size # <<<<<<<<<<<<<< * cdef uint32_t[3] data * data[0] = _htole32(refs) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_1 = PyObject_Length(__pyx_v_key); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 425, __pyx_L1_error) if (unlikely(!((__pyx_t_1 == __pyx_v_self->__pyx_base.key_size) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 425, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":427 * assert len(key) == self.key_size * cdef uint32_t[3] data * data[0] = _htole32(refs) # <<<<<<<<<<<<<< * data[1] = _htole32(size) * data[2] = _htole32(csize) */ __pyx_t_2 = __Pyx_PyInt_As_uint32_t(__pyx_v_refs); if (unlikely((__pyx_t_2 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 427, __pyx_L1_error) (__pyx_v_data[0]) = _htole32(__pyx_t_2); /* "borg/hashindex.pyx":428 * cdef uint32_t[3] data * data[0] = _htole32(refs) * data[1] = _htole32(size) # <<<<<<<<<<<<<< * data[2] = _htole32(csize) * self._add( key, data) */ __pyx_t_2 = __Pyx_PyInt_As_uint32_t(__pyx_v_size); if (unlikely((__pyx_t_2 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 428, __pyx_L1_error) (__pyx_v_data[1]) = _htole32(__pyx_t_2); /* "borg/hashindex.pyx":429 * data[0] = _htole32(refs) * data[1] = _htole32(size) * data[2] = _htole32(csize) # <<<<<<<<<<<<<< * self._add( key, data) * */ __pyx_t_2 = __Pyx_PyInt_As_uint32_t(__pyx_v_csize); if (unlikely((__pyx_t_2 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 429, __pyx_L1_error) (__pyx_v_data[2]) = _htole32(__pyx_t_2); /* "borg/hashindex.pyx":430 * data[1] = _htole32(size) * data[2] = _htole32(csize) * self._add( key, data) # <<<<<<<<<<<<<< * * cdef _add(self, void *key, uint32_t *data): */ __pyx_t_3 = __Pyx_PyObject_AsWritableString(__pyx_v_key); if (unlikely((!__pyx_t_3) && PyErr_Occurred())) __PYX_ERR(0, 430, __pyx_L1_error) __pyx_t_4 = ((struct __pyx_vtabstruct_4borg_9hashindex_ChunkIndex *)__pyx_v_self->__pyx_vtab)->_add(__pyx_v_self, ((char *)__pyx_t_3), __pyx_v_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 430, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/hashindex.pyx":424 * return size, csize, unique_size, unique_csize, unique_chunks, chunks * * def add(self, key, refs, size, csize): # <<<<<<<<<<<<<< * assert len(key) == self.key_size * cdef uint32_t[3] data */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":432 * self._add( key, data) * * cdef _add(self, void *key, uint32_t *data): # <<<<<<<<<<<<<< * cdef uint64_t refcount1, refcount2, result64 * values = hashindex_get(self.index, key) */ static PyObject *__pyx_f_4borg_9hashindex_10ChunkIndex__add(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, void *__pyx_v_key, uint32_t *__pyx_v_data) { uint64_t __pyx_v_refcount1; uint64_t __pyx_v_refcount2; uint64_t __pyx_v_result64; uint32_t *__pyx_v_values; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; uint32_t __pyx_t_2; uint64_t __pyx_t_3; uint64_t __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_add", 0); /* "borg/hashindex.pyx":434 * cdef _add(self, void *key, uint32_t *data): * cdef uint64_t refcount1, refcount2, result64 * values = hashindex_get(self.index, key) # <<<<<<<<<<<<<< * if values: * refcount1 = _le32toh(values[0]) */ __pyx_v_values = ((uint32_t *)hashindex_get(__pyx_v_self->__pyx_base.index, __pyx_v_key)); /* "borg/hashindex.pyx":435 * cdef uint64_t refcount1, refcount2, result64 * values = hashindex_get(self.index, key) * if values: # <<<<<<<<<<<<<< * refcount1 = _le32toh(values[0]) * refcount2 = _le32toh(data[0]) */ __pyx_t_1 = (__pyx_v_values != 0); if (__pyx_t_1) { /* "borg/hashindex.pyx":436 * values = hashindex_get(self.index, key) * if values: * refcount1 = _le32toh(values[0]) # <<<<<<<<<<<<<< * refcount2 = _le32toh(data[0]) * assert refcount1 <= _MAX_VALUE, "invalid reference count" */ __pyx_v_refcount1 = _le32toh((__pyx_v_values[0])); /* "borg/hashindex.pyx":437 * if values: * refcount1 = _le32toh(values[0]) * refcount2 = _le32toh(data[0]) # <<<<<<<<<<<<<< * assert refcount1 <= _MAX_VALUE, "invalid reference count" * assert refcount2 <= _MAX_VALUE, "invalid reference count" */ __pyx_v_refcount2 = _le32toh((__pyx_v_data[0])); /* "borg/hashindex.pyx":438 * refcount1 = _le32toh(values[0]) * refcount2 = _le32toh(data[0]) * assert refcount1 <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * assert refcount2 <= _MAX_VALUE, "invalid reference count" * result64 = refcount1 + refcount2 */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount1 <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 438, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":439 * refcount2 = _le32toh(data[0]) * assert refcount1 <= _MAX_VALUE, "invalid reference count" * assert refcount2 <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * result64 = refcount1 + refcount2 * values[0] = _htole32(min(result64, _MAX_VALUE)) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount2 <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 439, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":440 * assert refcount1 <= _MAX_VALUE, "invalid reference count" * assert refcount2 <= _MAX_VALUE, "invalid reference count" * result64 = refcount1 + refcount2 # <<<<<<<<<<<<<< * values[0] = _htole32(min(result64, _MAX_VALUE)) * values[1] = data[1] */ __pyx_v_result64 = (__pyx_v_refcount1 + __pyx_v_refcount2); /* "borg/hashindex.pyx":441 * assert refcount2 <= _MAX_VALUE, "invalid reference count" * result64 = refcount1 + refcount2 * values[0] = _htole32(min(result64, _MAX_VALUE)) # <<<<<<<<<<<<<< * values[1] = data[1] * values[2] = data[2] */ __pyx_t_2 = _MAX_VALUE; __pyx_t_3 = __pyx_v_result64; if (((__pyx_t_2 < __pyx_t_3) != 0)) { __pyx_t_4 = __pyx_t_2; } else { __pyx_t_4 = __pyx_t_3; } (__pyx_v_values[0]) = _htole32(__pyx_t_4); /* "borg/hashindex.pyx":442 * result64 = refcount1 + refcount2 * values[0] = _htole32(min(result64, _MAX_VALUE)) * values[1] = data[1] # <<<<<<<<<<<<<< * values[2] = data[2] * else: */ (__pyx_v_values[1]) = (__pyx_v_data[1]); /* "borg/hashindex.pyx":443 * values[0] = _htole32(min(result64, _MAX_VALUE)) * values[1] = data[1] * values[2] = data[2] # <<<<<<<<<<<<<< * else: * if not hashindex_set(self.index, key, data): */ (__pyx_v_values[2]) = (__pyx_v_data[2]); /* "borg/hashindex.pyx":435 * cdef uint64_t refcount1, refcount2, result64 * values = hashindex_get(self.index, key) * if values: # <<<<<<<<<<<<<< * refcount1 = _le32toh(values[0]) * refcount2 = _le32toh(data[0]) */ goto __pyx_L3; } /* "borg/hashindex.pyx":445 * values[2] = data[2] * else: * if not hashindex_set(self.index, key, data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ /*else*/ { __pyx_t_1 = ((!(hashindex_set(__pyx_v_self->__pyx_base.index, __pyx_v_key, __pyx_v_data) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "borg/hashindex.pyx":446 * else: * if not hashindex_set(self.index, key, data): * raise Exception('hashindex_set failed') # <<<<<<<<<<<<<< * * def merge(self, ChunkIndex other): */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 446, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 446, __pyx_L1_error) /* "borg/hashindex.pyx":445 * values[2] = data[2] * else: * if not hashindex_set(self.index, key, data): # <<<<<<<<<<<<<< * raise Exception('hashindex_set failed') * */ } } __pyx_L3:; /* "borg/hashindex.pyx":432 * self._add( key, data) * * cdef _add(self, void *key, uint32_t *data): # <<<<<<<<<<<<<< * cdef uint64_t refcount1, refcount2, result64 * values = hashindex_get(self.index, key) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.hashindex.ChunkIndex._add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":448 * raise Exception('hashindex_set failed') * * def merge(self, ChunkIndex other): # <<<<<<<<<<<<<< * cdef void *key = NULL * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_19merge(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_19merge(PyObject *__pyx_v_self, PyObject *__pyx_v_other) { int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("merge (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_other), __pyx_ptype_4borg_9hashindex_ChunkIndex, 1, "other", 0))) __PYX_ERR(0, 448, __pyx_L1_error) __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_18merge(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_other)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_18merge(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_other) { void *__pyx_v_key; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("merge", 0); /* "borg/hashindex.pyx":449 * * def merge(self, ChunkIndex other): * cdef void *key = NULL # <<<<<<<<<<<<<< * * while True: */ __pyx_v_key = NULL; /* "borg/hashindex.pyx":451 * cdef void *key = NULL * * while True: # <<<<<<<<<<<<<< * key = hashindex_next_key(other.index, key) * if not key: */ while (1) { /* "borg/hashindex.pyx":452 * * while True: * key = hashindex_next_key(other.index, key) # <<<<<<<<<<<<<< * if not key: * break */ __pyx_v_key = hashindex_next_key(__pyx_v_other->__pyx_base.index, __pyx_v_key); /* "borg/hashindex.pyx":453 * while True: * key = hashindex_next_key(other.index, key) * if not key: # <<<<<<<<<<<<<< * break * self._add(key, (key + self.key_size)) */ __pyx_t_1 = ((!(__pyx_v_key != 0)) != 0); if (__pyx_t_1) { /* "borg/hashindex.pyx":454 * key = hashindex_next_key(other.index, key) * if not key: * break # <<<<<<<<<<<<<< * self._add(key, (key + self.key_size)) * */ goto __pyx_L4_break; /* "borg/hashindex.pyx":453 * while True: * key = hashindex_next_key(other.index, key) * if not key: # <<<<<<<<<<<<<< * break * self._add(key, (key + self.key_size)) */ } /* "borg/hashindex.pyx":455 * if not key: * break * self._add(key, (key + self.key_size)) # <<<<<<<<<<<<<< * * def zero_csize_ids(self): */ __pyx_t_2 = ((struct __pyx_vtabstruct_4borg_9hashindex_ChunkIndex *)__pyx_v_self->__pyx_vtab)->_add(__pyx_v_self, __pyx_v_key, ((uint32_t *)(__pyx_v_key + __pyx_v_self->__pyx_base.key_size))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 455, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4_break:; /* "borg/hashindex.pyx":448 * raise Exception('hashindex_set failed') * * def merge(self, ChunkIndex other): # <<<<<<<<<<<<<< * cdef void *key = NULL * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.merge", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":457 * self._add(key, (key + self.key_size)) * * def zero_csize_ids(self): # <<<<<<<<<<<<<< * cdef void *key = NULL * cdef uint32_t *values */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_21zero_csize_ids(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_21zero_csize_ids(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("zero_csize_ids (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_20zero_csize_ids(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_20zero_csize_ids(struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self) { void *__pyx_v_key; uint32_t *__pyx_v_values; PyObject *__pyx_v_entries = NULL; uint32_t __pyx_v_refcount; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("zero_csize_ids", 0); /* "borg/hashindex.pyx":458 * * def zero_csize_ids(self): * cdef void *key = NULL # <<<<<<<<<<<<<< * cdef uint32_t *values * entries = [] */ __pyx_v_key = NULL; /* "borg/hashindex.pyx":460 * cdef void *key = NULL * cdef uint32_t *values * entries = [] # <<<<<<<<<<<<<< * while True: * key = hashindex_next_key(self.index, key) */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 460, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_entries = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "borg/hashindex.pyx":461 * cdef uint32_t *values * entries = [] * while True: # <<<<<<<<<<<<<< * key = hashindex_next_key(self.index, key) * if not key: */ while (1) { /* "borg/hashindex.pyx":462 * entries = [] * while True: * key = hashindex_next_key(self.index, key) # <<<<<<<<<<<<<< * if not key: * break */ __pyx_v_key = hashindex_next_key(__pyx_v_self->__pyx_base.index, __pyx_v_key); /* "borg/hashindex.pyx":463 * while True: * key = hashindex_next_key(self.index, key) * if not key: # <<<<<<<<<<<<<< * break * values = (key + self.key_size) */ __pyx_t_2 = ((!(__pyx_v_key != 0)) != 0); if (__pyx_t_2) { /* "borg/hashindex.pyx":464 * key = hashindex_next_key(self.index, key) * if not key: * break # <<<<<<<<<<<<<< * values = (key + self.key_size) * refcount = _le32toh(values[0]) */ goto __pyx_L4_break; /* "borg/hashindex.pyx":463 * while True: * key = hashindex_next_key(self.index, key) * if not key: # <<<<<<<<<<<<<< * break * values = (key + self.key_size) */ } /* "borg/hashindex.pyx":465 * if not key: * break * values = (key + self.key_size) # <<<<<<<<<<<<<< * refcount = _le32toh(values[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" */ __pyx_v_values = ((uint32_t *)(__pyx_v_key + __pyx_v_self->__pyx_base.key_size)); /* "borg/hashindex.pyx":466 * break * values = (key + self.key_size) * refcount = _le32toh(values[0]) # <<<<<<<<<<<<<< * assert refcount <= _MAX_VALUE, "invalid reference count" * if _le32toh(values[2]) == 0: */ __pyx_v_refcount = _le32toh((__pyx_v_values[0])); /* "borg/hashindex.pyx":467 * values = (key + self.key_size) * refcount = _le32toh(values[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * if _le32toh(values[2]) == 0: * # csize == 0 */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 467, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":468 * refcount = _le32toh(values[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" * if _le32toh(values[2]) == 0: # <<<<<<<<<<<<<< * # csize == 0 * entries.append(PyBytes_FromStringAndSize( key, self.key_size)) */ __pyx_t_2 = ((_le32toh((__pyx_v_values[2])) == 0) != 0); if (__pyx_t_2) { /* "borg/hashindex.pyx":470 * if _le32toh(values[2]) == 0: * # csize == 0 * entries.append(PyBytes_FromStringAndSize( key, self.key_size)) # <<<<<<<<<<<<<< * return entries * */ __pyx_t_1 = PyBytes_FromStringAndSize(((char *)__pyx_v_key), __pyx_v_self->__pyx_base.key_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyList_Append(__pyx_v_entries, __pyx_t_1); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 470, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/hashindex.pyx":468 * refcount = _le32toh(values[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" * if _le32toh(values[2]) == 0: # <<<<<<<<<<<<<< * # csize == 0 * entries.append(PyBytes_FromStringAndSize( key, self.key_size)) */ } } __pyx_L4_break:; /* "borg/hashindex.pyx":471 * # csize == 0 * entries.append(PyBytes_FromStringAndSize( key, self.key_size)) * return entries # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_entries); __pyx_r = __pyx_v_entries; goto __pyx_L0; /* "borg/hashindex.pyx":457 * self._add(key, (key + self.key_size)) * * def zero_csize_ids(self): # <<<<<<<<<<<<<< * cdef void *key = NULL * cdef uint32_t *values */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.zero_csize_ids", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_entries); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_23__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_23__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_22__reduce_cython__(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_22__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_25__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_10ChunkIndex_25__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_10ChunkIndex_24__setstate_cython__(((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_10ChunkIndex_24__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkIndex *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.ChunkIndex.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":481 * cdef int exhausted * * def __cinit__(self, key_size): # <<<<<<<<<<<<<< * self.key = NULL * self.key_size = key_size */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_16ChunkKeyIterator_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4borg_9hashindex_16ChunkKeyIterator_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_key_size = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_key_size_2,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key_size_2)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(0, 481, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_key_size = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 481, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.ChunkKeyIterator.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_16ChunkKeyIterator___cinit__(((struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)__pyx_v_self), __pyx_v_key_size); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_16ChunkKeyIterator___cinit__(struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self, PyObject *__pyx_v_key_size) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "borg/hashindex.pyx":482 * * def __cinit__(self, key_size): * self.key = NULL # <<<<<<<<<<<<<< * self.key_size = key_size * self.exhausted = 0 */ __pyx_v_self->key = NULL; /* "borg/hashindex.pyx":483 * def __cinit__(self, key_size): * self.key = NULL * self.key_size = key_size # <<<<<<<<<<<<<< * self.exhausted = 0 * */ __pyx_t_1 = __Pyx_PyInt_As_int(__pyx_v_key_size); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 483, __pyx_L1_error) __pyx_v_self->key_size = __pyx_t_1; /* "borg/hashindex.pyx":484 * self.key = NULL * self.key_size = key_size * self.exhausted = 0 # <<<<<<<<<<<<<< * * def __iter__(self): */ __pyx_v_self->exhausted = 0; /* "borg/hashindex.pyx":481 * cdef int exhausted * * def __cinit__(self, key_size): # <<<<<<<<<<<<<< * self.key = NULL * self.key_size = key_size */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("borg.hashindex.ChunkKeyIterator.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":486 * self.exhausted = 0 * * def __iter__(self): # <<<<<<<<<<<<<< * return self * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_3__iter__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_3__iter__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_16ChunkKeyIterator_2__iter__(((struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_2__iter__(struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__iter__", 0); /* "borg/hashindex.pyx":487 * * def __iter__(self): * return self # <<<<<<<<<<<<<< * * def __next__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "borg/hashindex.pyx":486 * self.exhausted = 0 * * def __iter__(self): # <<<<<<<<<<<<<< * return self * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":489 * return self * * def __next__(self): # <<<<<<<<<<<<<< * if self.exhausted: * raise StopIteration */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_5__next__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_5__next__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_16ChunkKeyIterator_4__next__(((struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_4__next__(struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self) { uint32_t *__pyx_v_value; uint32_t __pyx_v_refcount; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__next__", 0); /* "borg/hashindex.pyx":490 * * def __next__(self): * if self.exhausted: # <<<<<<<<<<<<<< * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) */ __pyx_t_1 = (__pyx_v_self->exhausted != 0); if (unlikely(__pyx_t_1)) { /* "borg/hashindex.pyx":491 * def __next__(self): * if self.exhausted: * raise StopIteration # <<<<<<<<<<<<<< * self.key = hashindex_next_key(self.index, self.key) * if not self.key: */ __Pyx_Raise(__pyx_builtin_StopIteration, 0, 0, 0); __PYX_ERR(0, 491, __pyx_L1_error) /* "borg/hashindex.pyx":490 * * def __next__(self): * if self.exhausted: # <<<<<<<<<<<<<< * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) */ } /* "borg/hashindex.pyx":492 * if self.exhausted: * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) # <<<<<<<<<<<<<< * if not self.key: * self.exhausted = 1 */ __pyx_v_self->key = hashindex_next_key(__pyx_v_self->index, ((char *)__pyx_v_self->key)); /* "borg/hashindex.pyx":493 * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) * if not self.key: # <<<<<<<<<<<<<< * self.exhausted = 1 * raise StopIteration */ __pyx_t_1 = ((!(__pyx_v_self->key != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "borg/hashindex.pyx":494 * self.key = hashindex_next_key(self.index, self.key) * if not self.key: * self.exhausted = 1 # <<<<<<<<<<<<<< * raise StopIteration * cdef uint32_t *value = (self.key + self.key_size) */ __pyx_v_self->exhausted = 1; /* "borg/hashindex.pyx":495 * if not self.key: * self.exhausted = 1 * raise StopIteration # <<<<<<<<<<<<<< * cdef uint32_t *value = (self.key + self.key_size) * cdef uint32_t refcount = _le32toh(value[0]) */ __Pyx_Raise(__pyx_builtin_StopIteration, 0, 0, 0); __PYX_ERR(0, 495, __pyx_L1_error) /* "borg/hashindex.pyx":493 * raise StopIteration * self.key = hashindex_next_key(self.index, self.key) * if not self.key: # <<<<<<<<<<<<<< * self.exhausted = 1 * raise StopIteration */ } /* "borg/hashindex.pyx":496 * self.exhausted = 1 * raise StopIteration * cdef uint32_t *value = (self.key + self.key_size) # <<<<<<<<<<<<<< * cdef uint32_t refcount = _le32toh(value[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" */ __pyx_v_value = ((uint32_t *)(__pyx_v_self->key + __pyx_v_self->key_size)); /* "borg/hashindex.pyx":497 * raise StopIteration * cdef uint32_t *value = (self.key + self.key_size) * cdef uint32_t refcount = _le32toh(value[0]) # <<<<<<<<<<<<<< * assert refcount <= _MAX_VALUE, "invalid reference count" * return (self.key)[:self.key_size], ChunkIndexEntry(refcount, _le32toh(value[1]), _le32toh(value[2])) */ __pyx_v_refcount = _le32toh((__pyx_v_value[0])); /* "borg/hashindex.pyx":498 * cdef uint32_t *value = (self.key + self.key_size) * cdef uint32_t refcount = _le32toh(value[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" # <<<<<<<<<<<<<< * return (self.key)[:self.key_size], ChunkIndexEntry(refcount, _le32toh(value[1]), _le32toh(value[2])) * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_refcount <= _MAX_VALUE) != 0))) { PyErr_SetObject(PyExc_AssertionError, __pyx_kp_u_invalid_reference_count); __PYX_ERR(0, 498, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":499 * cdef uint32_t refcount = _le32toh(value[0]) * assert refcount <= _MAX_VALUE, "invalid reference count" * return (self.key)[:self.key_size], ChunkIndexEntry(refcount, _le32toh(value[1]), _le32toh(value[2])) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBytes_FromStringAndSize(((char *)__pyx_v_self->key) + 0, __pyx_v_self->key_size - 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_ChunkIndexEntry); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_uint32_t(__pyx_v_refcount); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_value[1]))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyInt_From_uint32_t(_le32toh((__pyx_v_value[2]))); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_8, __pyx_t_5, __pyx_t_6, __pyx_t_7}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_8, __pyx_t_5, __pyx_t_6, __pyx_t_7}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } else #endif { __pyx_t_10 = PyTuple_New(3+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__pyx_t_8) { __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); __pyx_t_8 = NULL; } __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0+__pyx_t_9, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_9, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_10, 2+__pyx_t_9, __pyx_t_7); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":489 * return self * * def __next__(self): # <<<<<<<<<<<<<< * if self.exhausted: * raise StopIteration */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("borg.hashindex.ChunkKeyIterator.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_16ChunkKeyIterator_6__reduce_cython__(((struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.ChunkKeyIterator.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_16ChunkKeyIterator_8__setstate_cython__(((struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_16ChunkKeyIterator_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.ChunkKeyIterator.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":502 * * * cdef Py_buffer ro_buffer(object data) except *: # <<<<<<<<<<<<<< * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) */ static Py_buffer __pyx_f_4borg_9hashindex_ro_buffer(PyObject *__pyx_v_data) { Py_buffer __pyx_v_view; Py_buffer __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("ro_buffer", 0); /* "borg/hashindex.pyx":504 * cdef Py_buffer ro_buffer(object data) except *: * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) # <<<<<<<<<<<<<< * return view * */ __pyx_t_1 = PyObject_GetBuffer(__pyx_v_data, (&__pyx_v_view), PyBUF_SIMPLE); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 504, __pyx_L1_error) /* "borg/hashindex.pyx":505 * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) * return view # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_view; goto __pyx_L0; /* "borg/hashindex.pyx":502 * * * cdef Py_buffer ro_buffer(object data) except *: # <<<<<<<<<<<<<< * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.hashindex.ro_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_pretend_to_initialize(&__pyx_r); __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":512 * cdef CacheSyncCtx *sync * * def __cinit__(self, chunks): # <<<<<<<<<<<<<< * self.chunks = chunks * self.sync = cache_sync_init(self.chunks.index) */ /* Python wrapper */ static int __pyx_pw_4borg_9hashindex_17CacheSynchronizer_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4borg_9hashindex_17CacheSynchronizer_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_chunks = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_chunks,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_chunks)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(0, 512, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_chunks = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 512, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer___cinit__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self), __pyx_v_chunks); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_9hashindex_17CacheSynchronizer___cinit__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self, PyObject *__pyx_v_chunks) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "borg/hashindex.pyx":513 * * def __cinit__(self, chunks): * self.chunks = chunks # <<<<<<<<<<<<<< * self.sync = cache_sync_init(self.chunks.index) * if not self.sync: */ if (!(likely(((__pyx_v_chunks) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_chunks, __pyx_ptype_4borg_9hashindex_ChunkIndex))))) __PYX_ERR(0, 513, __pyx_L1_error) __pyx_t_1 = __pyx_v_chunks; __Pyx_INCREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->chunks); __Pyx_DECREF(((PyObject *)__pyx_v_self->chunks)); __pyx_v_self->chunks = ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)__pyx_t_1); __pyx_t_1 = 0; /* "borg/hashindex.pyx":514 * def __cinit__(self, chunks): * self.chunks = chunks * self.sync = cache_sync_init(self.chunks.index) # <<<<<<<<<<<<<< * if not self.sync: * raise Exception('cache_sync_init failed') */ __pyx_v_self->sync = cache_sync_init(__pyx_v_self->chunks->__pyx_base.index); /* "borg/hashindex.pyx":515 * self.chunks = chunks * self.sync = cache_sync_init(self.chunks.index) * if not self.sync: # <<<<<<<<<<<<<< * raise Exception('cache_sync_init failed') * */ __pyx_t_2 = ((!(__pyx_v_self->sync != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "borg/hashindex.pyx":516 * self.sync = cache_sync_init(self.chunks.index) * if not self.sync: * raise Exception('cache_sync_init failed') # <<<<<<<<<<<<<< * * def __dealloc__(self): */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 516, __pyx_L1_error) /* "borg/hashindex.pyx":515 * self.chunks = chunks * self.sync = cache_sync_init(self.chunks.index) * if not self.sync: # <<<<<<<<<<<<<< * raise Exception('cache_sync_init failed') * */ } /* "borg/hashindex.pyx":512 * cdef CacheSyncCtx *sync * * def __cinit__(self, chunks): # <<<<<<<<<<<<<< * self.chunks = chunks * self.sync = cache_sync_init(self.chunks.index) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":518 * raise Exception('cache_sync_init failed') * * def __dealloc__(self): # <<<<<<<<<<<<<< * if self.sync: * cache_sync_free(self.sync) */ /* Python wrapper */ static void __pyx_pw_4borg_9hashindex_17CacheSynchronizer_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4borg_9hashindex_17CacheSynchronizer_3__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_pf_4borg_9hashindex_17CacheSynchronizer_2__dealloc__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4borg_9hashindex_17CacheSynchronizer_2__dealloc__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "borg/hashindex.pyx":519 * * def __dealloc__(self): * if self.sync: # <<<<<<<<<<<<<< * cache_sync_free(self.sync) * */ __pyx_t_1 = (__pyx_v_self->sync != 0); if (__pyx_t_1) { /* "borg/hashindex.pyx":520 * def __dealloc__(self): * if self.sync: * cache_sync_free(self.sync) # <<<<<<<<<<<<<< * * def feed(self, chunk): */ cache_sync_free(__pyx_v_self->sync); /* "borg/hashindex.pyx":519 * * def __dealloc__(self): * if self.sync: # <<<<<<<<<<<<<< * cache_sync_free(self.sync) * */ } /* "borg/hashindex.pyx":518 * raise Exception('cache_sync_init failed') * * def __dealloc__(self): # <<<<<<<<<<<<<< * if self.sync: * cache_sync_free(self.sync) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "borg/hashindex.pyx":522 * cache_sync_free(self.sync) * * def feed(self, chunk): # <<<<<<<<<<<<<< * cdef Py_buffer chunk_buf = ro_buffer(chunk) * cdef int rc */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_5feed(PyObject *__pyx_v_self, PyObject *__pyx_v_chunk); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_5feed(PyObject *__pyx_v_self, PyObject *__pyx_v_chunk) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("feed (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_4feed(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self), ((PyObject *)__pyx_v_chunk)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_4feed(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self, PyObject *__pyx_v_chunk) { Py_buffer __pyx_v_chunk_buf; int __pyx_v_rc; char const *__pyx_v_error; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_buffer __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("feed", 0); /* "borg/hashindex.pyx":523 * * def feed(self, chunk): * cdef Py_buffer chunk_buf = ro_buffer(chunk) # <<<<<<<<<<<<<< * cdef int rc * rc = cache_sync_feed(self.sync, chunk_buf.buf, chunk_buf.len) */ __pyx_t_1 = __pyx_f_4borg_9hashindex_ro_buffer(__pyx_v_chunk); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 523, __pyx_L1_error) __pyx_v_chunk_buf = __pyx_t_1; /* "borg/hashindex.pyx":525 * cdef Py_buffer chunk_buf = ro_buffer(chunk) * cdef int rc * rc = cache_sync_feed(self.sync, chunk_buf.buf, chunk_buf.len) # <<<<<<<<<<<<<< * PyBuffer_Release(&chunk_buf) * if not rc: */ __pyx_v_rc = cache_sync_feed(__pyx_v_self->sync, __pyx_v_chunk_buf.buf, __pyx_v_chunk_buf.len); /* "borg/hashindex.pyx":526 * cdef int rc * rc = cache_sync_feed(self.sync, chunk_buf.buf, chunk_buf.len) * PyBuffer_Release(&chunk_buf) # <<<<<<<<<<<<<< * if not rc: * error = cache_sync_error(self.sync) */ PyBuffer_Release((&__pyx_v_chunk_buf)); /* "borg/hashindex.pyx":527 * rc = cache_sync_feed(self.sync, chunk_buf.buf, chunk_buf.len) * PyBuffer_Release(&chunk_buf) * if not rc: # <<<<<<<<<<<<<< * error = cache_sync_error(self.sync) * if error != NULL: */ __pyx_t_2 = ((!(__pyx_v_rc != 0)) != 0); if (__pyx_t_2) { /* "borg/hashindex.pyx":528 * PyBuffer_Release(&chunk_buf) * if not rc: * error = cache_sync_error(self.sync) # <<<<<<<<<<<<<< * if error != NULL: * raise ValueError('cache_sync_feed failed: ' + error.decode('ascii')) */ __pyx_v_error = cache_sync_error(__pyx_v_self->sync); /* "borg/hashindex.pyx":529 * if not rc: * error = cache_sync_error(self.sync) * if error != NULL: # <<<<<<<<<<<<<< * raise ValueError('cache_sync_feed failed: ' + error.decode('ascii')) * */ __pyx_t_2 = ((__pyx_v_error != NULL) != 0); if (unlikely(__pyx_t_2)) { /* "borg/hashindex.pyx":530 * error = cache_sync_error(self.sync) * if error != NULL: * raise ValueError('cache_sync_feed failed: ' + error.decode('ascii')) # <<<<<<<<<<<<<< * * @property */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_error, 0, strlen(__pyx_v_error), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyUnicode_Concat(__pyx_kp_u_cache_sync_feed_failed, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 530, __pyx_L1_error) /* "borg/hashindex.pyx":529 * if not rc: * error = cache_sync_error(self.sync) * if error != NULL: # <<<<<<<<<<<<<< * raise ValueError('cache_sync_feed failed: ' + error.decode('ascii')) * */ } /* "borg/hashindex.pyx":527 * rc = cache_sync_feed(self.sync, chunk_buf.buf, chunk_buf.len) * PyBuffer_Release(&chunk_buf) * if not rc: # <<<<<<<<<<<<<< * error = cache_sync_error(self.sync) * if error != NULL: */ } /* "borg/hashindex.pyx":522 * cache_sync_free(self.sync) * * def feed(self, chunk): # <<<<<<<<<<<<<< * cdef Py_buffer chunk_buf = ro_buffer(chunk) * cdef int rc */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.feed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":533 * * @property * def num_files_totals(self): # <<<<<<<<<<<<<< * return cache_sync_num_files_totals(self.sync) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_16num_files_totals_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_16num_files_totals_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_16num_files_totals___get__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_16num_files_totals___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "borg/hashindex.pyx":534 * @property * def num_files_totals(self): * return cache_sync_num_files_totals(self.sync) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_uint64_t(cache_sync_num_files_totals(__pyx_v_self->sync)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 534, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":533 * * @property * def num_files_totals(self): # <<<<<<<<<<<<<< * return cache_sync_num_files_totals(self.sync) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.num_files_totals.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":537 * * @property * def num_files_parts(self): # <<<<<<<<<<<<<< * return cache_sync_num_files_parts(self.sync) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_15num_files_parts_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_15num_files_parts_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_15num_files_parts___get__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_15num_files_parts___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "borg/hashindex.pyx":538 * @property * def num_files_parts(self): * return cache_sync_num_files_parts(self.sync) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_uint64_t(cache_sync_num_files_parts(__pyx_v_self->sync)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":537 * * @property * def num_files_parts(self): # <<<<<<<<<<<<<< * return cache_sync_num_files_parts(self.sync) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.num_files_parts.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":541 * * @property * def size_totals(self): # <<<<<<<<<<<<<< * return cache_sync_size_totals(self.sync) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_11size_totals_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_11size_totals_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_11size_totals___get__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_11size_totals___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "borg/hashindex.pyx":542 * @property * def size_totals(self): * return cache_sync_size_totals(self.sync) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_uint64_t(cache_sync_size_totals(__pyx_v_self->sync)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 542, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":541 * * @property * def size_totals(self): # <<<<<<<<<<<<<< * return cache_sync_size_totals(self.sync) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.size_totals.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":545 * * @property * def size_parts(self): # <<<<<<<<<<<<<< * return cache_sync_size_parts(self.sync) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_10size_parts_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_10size_parts_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_10size_parts___get__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_10size_parts___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "borg/hashindex.pyx":546 * @property * def size_parts(self): * return cache_sync_size_parts(self.sync) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_uint64_t(cache_sync_size_parts(__pyx_v_self->sync)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 546, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":545 * * @property * def size_parts(self): # <<<<<<<<<<<<<< * return cache_sync_size_parts(self.sync) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.size_parts.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":549 * * @property * def csize_totals(self): # <<<<<<<<<<<<<< * return cache_sync_csize_totals(self.sync) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_12csize_totals_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_12csize_totals_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_12csize_totals___get__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_12csize_totals___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "borg/hashindex.pyx":550 * @property * def csize_totals(self): * return cache_sync_csize_totals(self.sync) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_uint64_t(cache_sync_csize_totals(__pyx_v_self->sync)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":549 * * @property * def csize_totals(self): # <<<<<<<<<<<<<< * return cache_sync_csize_totals(self.sync) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.csize_totals.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/hashindex.pyx":553 * * @property * def csize_parts(self): # <<<<<<<<<<<<<< * return cache_sync_csize_parts(self.sync) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_11csize_parts_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_11csize_parts_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_11csize_parts___get__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_11csize_parts___get__(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "borg/hashindex.pyx":554 * @property * def csize_parts(self): * return cache_sync_csize_parts(self.sync) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_uint64_t(cache_sync_csize_parts(__pyx_v_self->sync)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/hashindex.pyx":553 * * @property * def csize_parts(self): # <<<<<<<<<<<<<< * return cache_sync_csize_parts(self.sync) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.csize_parts.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_6__reduce_cython__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_9hashindex_17CacheSynchronizer_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_9hashindex_17CacheSynchronizer_8__setstate_cython__(((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_9hashindex_17CacheSynchronizer_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.hashindex.CacheSynchronizer.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_tp_new_4borg_9hashindex_IndexBase(PyTypeObject *t, PyObject *a, PyObject *k) { PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; if (unlikely(__pyx_pw_4borg_9hashindex_9IndexBase_1__cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_4borg_9hashindex_IndexBase(PyObject *o) { #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_pw_4borg_9hashindex_9IndexBase_3__dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } (*Py_TYPE(o)->tp_free)(o); } static int __pyx_mp_ass_subscript_4borg_9hashindex_IndexBase(PyObject *o, PyObject *i, PyObject *v) { if (v) { PyErr_Format(PyExc_NotImplementedError, "Subscript assignment not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } else { return __pyx_pw_4borg_9hashindex_9IndexBase_13__delitem__(o, i); } } static PyMethodDef __pyx_methods_4borg_9hashindex_IndexBase[] = { {"read", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_9hashindex_9IndexBase_5read, METH_VARARGS|METH_KEYWORDS, 0}, {"write", (PyCFunction)__pyx_pw_4borg_9hashindex_9IndexBase_7write, METH_O, 0}, {"clear", (PyCFunction)__pyx_pw_4borg_9hashindex_9IndexBase_9clear, METH_NOARGS, 0}, {"setdefault", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_9hashindex_9IndexBase_11setdefault, METH_VARARGS|METH_KEYWORDS, 0}, {"get", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_9hashindex_9IndexBase_15get, METH_VARARGS|METH_KEYWORDS, 0}, {"pop", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_9hashindex_9IndexBase_17pop, METH_VARARGS|METH_KEYWORDS, 0}, {"size", (PyCFunction)__pyx_pw_4borg_9hashindex_9IndexBase_21size, METH_NOARGS, __pyx_doc_4borg_9hashindex_9IndexBase_20size}, {"compact", (PyCFunction)__pyx_pw_4borg_9hashindex_9IndexBase_23compact, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_9IndexBase_25__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_9IndexBase_27__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_IndexBase = { __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ 0, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_IndexBase = { __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*mp_length*/ 0, /*mp_subscript*/ __pyx_mp_ass_subscript_4borg_9hashindex_IndexBase, /*mp_ass_subscript*/ }; static PyTypeObject __pyx_type_4borg_9hashindex_IndexBase = { PyVarObject_HEAD_INIT(0, 0) "borg.hashindex.IndexBase", /*tp_name*/ sizeof(struct __pyx_obj_4borg_9hashindex_IndexBase), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_9hashindex_IndexBase, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_IndexBase, /*tp_as_sequence*/ &__pyx_tp_as_mapping_IndexBase, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4borg_9hashindex_IndexBase, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_9hashindex_IndexBase, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_4borg_9hashindex_FuseVersionsIndex(PyTypeObject *t, PyObject *a, PyObject *k) { PyObject *o = __pyx_tp_new_4borg_9hashindex_IndexBase(t, a, k); if (unlikely(!o)) return 0; return o; } static PyObject *__pyx_sq_item_4borg_9hashindex_FuseVersionsIndex(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_4borg_9hashindex_FuseVersionsIndex(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_pw_4borg_9hashindex_17FuseVersionsIndex_3__setitem__(o, i, v); } else { if (__pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping && __pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping->mp_ass_subscript) return __pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping->mp_ass_subscript(o, i, v); PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyMethodDef __pyx_methods_4borg_9hashindex_FuseVersionsIndex[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_7__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_17FuseVersionsIndex_9__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_FuseVersionsIndex = { #if CYTHON_COMPILING_IN_PYPY __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*sq_length*/ #else 0, /*sq_length*/ #endif 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_4borg_9hashindex_FuseVersionsIndex, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ __pyx_pw_4borg_9hashindex_17FuseVersionsIndex_5__contains__, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_FuseVersionsIndex = { #if CYTHON_COMPILING_IN_PYPY __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*mp_length*/ #else 0, /*mp_length*/ #endif __pyx_pw_4borg_9hashindex_17FuseVersionsIndex_1__getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_4borg_9hashindex_FuseVersionsIndex, /*mp_ass_subscript*/ }; static PyTypeObject __pyx_type_4borg_9hashindex_FuseVersionsIndex = { PyVarObject_HEAD_INIT(0, 0) "borg.hashindex.FuseVersionsIndex", /*tp_name*/ sizeof(struct __pyx_obj_4borg_9hashindex_FuseVersionsIndex), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_9hashindex_IndexBase, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_FuseVersionsIndex, /*tp_as_sequence*/ &__pyx_tp_as_mapping_FuseVersionsIndex, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4borg_9hashindex_FuseVersionsIndex, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_9hashindex_FuseVersionsIndex, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_4borg_9hashindex_NSIndex(PyTypeObject *t, PyObject *a, PyObject *k) { PyObject *o = __pyx_tp_new_4borg_9hashindex_IndexBase(t, a, k); if (unlikely(!o)) return 0; return o; } static PyObject *__pyx_sq_item_4borg_9hashindex_NSIndex(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_4borg_9hashindex_NSIndex(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_pw_4borg_9hashindex_7NSIndex_3__setitem__(o, i, v); } else { if (__pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping && __pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping->mp_ass_subscript) return __pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping->mp_ass_subscript(o, i, v); PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyMethodDef __pyx_methods_4borg_9hashindex_NSIndex[] = { {"iteritems", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_9hashindex_7NSIndex_7iteritems, METH_VARARGS|METH_KEYWORDS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_7NSIndex_9__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_7NSIndex_11__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_NSIndex = { #if CYTHON_COMPILING_IN_PYPY __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*sq_length*/ #else 0, /*sq_length*/ #endif 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_4borg_9hashindex_NSIndex, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ __pyx_pw_4borg_9hashindex_7NSIndex_5__contains__, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_NSIndex = { #if CYTHON_COMPILING_IN_PYPY __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*mp_length*/ #else 0, /*mp_length*/ #endif __pyx_pw_4borg_9hashindex_7NSIndex_1__getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_4borg_9hashindex_NSIndex, /*mp_ass_subscript*/ }; static PyTypeObject __pyx_type_4borg_9hashindex_NSIndex = { PyVarObject_HEAD_INIT(0, 0) "borg.hashindex.NSIndex", /*tp_name*/ sizeof(struct __pyx_obj_4borg_9hashindex_NSIndex), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_9hashindex_IndexBase, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_NSIndex, /*tp_as_sequence*/ &__pyx_tp_as_mapping_NSIndex, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4borg_9hashindex_NSIndex, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_9hashindex_NSIndex, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_4borg_9hashindex_NSKeyIterator(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_obj_4borg_9hashindex_NSKeyIterator *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)o); p->idx = ((struct __pyx_obj_4borg_9hashindex_NSIndex *)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_pw_4borg_9hashindex_13NSKeyIterator_1__cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_4borg_9hashindex_NSKeyIterator(PyObject *o) { struct __pyx_obj_4borg_9hashindex_NSKeyIterator *p = (struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->idx); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_4borg_9hashindex_NSKeyIterator(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_4borg_9hashindex_NSKeyIterator *p = (struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)o; if (p->idx) { e = (*v)(((PyObject *)p->idx), a); if (e) return e; } return 0; } static int __pyx_tp_clear_4borg_9hashindex_NSKeyIterator(PyObject *o) { PyObject* tmp; struct __pyx_obj_4borg_9hashindex_NSKeyIterator *p = (struct __pyx_obj_4borg_9hashindex_NSKeyIterator *)o; tmp = ((PyObject*)p->idx); p->idx = ((struct __pyx_obj_4borg_9hashindex_NSIndex *)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyObject *__pyx_specialmethod___pyx_pw_4borg_9hashindex_13NSKeyIterator_5__next__(PyObject *self, CYTHON_UNUSED PyObject *arg) {return __pyx_pw_4borg_9hashindex_13NSKeyIterator_5__next__(self);} static PyMethodDef __pyx_methods_4borg_9hashindex_NSKeyIterator[] = { {"__next__", (PyCFunction)__pyx_specialmethod___pyx_pw_4borg_9hashindex_13NSKeyIterator_5__next__, METH_NOARGS|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_13NSKeyIterator_7__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_13NSKeyIterator_9__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_4borg_9hashindex_NSKeyIterator = { PyVarObject_HEAD_INIT(0, 0) "borg.hashindex.NSKeyIterator", /*tp_name*/ sizeof(struct __pyx_obj_4borg_9hashindex_NSKeyIterator), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_9hashindex_NSKeyIterator, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_4borg_9hashindex_NSKeyIterator, /*tp_traverse*/ __pyx_tp_clear_4borg_9hashindex_NSKeyIterator, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ __pyx_pw_4borg_9hashindex_13NSKeyIterator_3__iter__, /*tp_iter*/ __pyx_pw_4borg_9hashindex_13NSKeyIterator_5__next__, /*tp_iternext*/ __pyx_methods_4borg_9hashindex_NSKeyIterator, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_9hashindex_NSKeyIterator, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_4borg_9hashindex_ChunkIndex __pyx_vtable_4borg_9hashindex_ChunkIndex; static PyObject *__pyx_tp_new_4borg_9hashindex_ChunkIndex(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_obj_4borg_9hashindex_ChunkIndex *p; PyObject *o = __pyx_tp_new_4borg_9hashindex_IndexBase(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)o); p->__pyx_vtab = __pyx_vtabptr_4borg_9hashindex_ChunkIndex; return o; } static PyObject *__pyx_sq_item_4borg_9hashindex_ChunkIndex(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_4borg_9hashindex_ChunkIndex(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_pw_4borg_9hashindex_10ChunkIndex_3__setitem__(o, i, v); } else { if (__pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping && __pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping->mp_ass_subscript) return __pyx_ptype_4borg_9hashindex_IndexBase->tp_as_mapping->mp_ass_subscript(o, i, v); PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyMethodDef __pyx_methods_4borg_9hashindex_ChunkIndex[] = { {"incref", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_7incref, METH_O, __pyx_doc_4borg_9hashindex_10ChunkIndex_6incref}, {"decref", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_9decref, METH_O, __pyx_doc_4borg_9hashindex_10ChunkIndex_8decref}, {"iteritems", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_9hashindex_10ChunkIndex_11iteritems, METH_VARARGS|METH_KEYWORDS, 0}, {"summarize", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_13summarize, METH_NOARGS, 0}, {"stats_against", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_15stats_against, METH_O, __pyx_doc_4borg_9hashindex_10ChunkIndex_14stats_against}, {"add", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_9hashindex_10ChunkIndex_17add, METH_VARARGS|METH_KEYWORDS, 0}, {"merge", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_19merge, METH_O, 0}, {"zero_csize_ids", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_21zero_csize_ids, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_23__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_10ChunkIndex_25__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_ChunkIndex = { #if CYTHON_COMPILING_IN_PYPY __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*sq_length*/ #else 0, /*sq_length*/ #endif 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_4borg_9hashindex_ChunkIndex, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ __pyx_pw_4borg_9hashindex_10ChunkIndex_5__contains__, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_ChunkIndex = { #if CYTHON_COMPILING_IN_PYPY __pyx_pw_4borg_9hashindex_9IndexBase_19__len__, /*mp_length*/ #else 0, /*mp_length*/ #endif __pyx_pw_4borg_9hashindex_10ChunkIndex_1__getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_4borg_9hashindex_ChunkIndex, /*mp_ass_subscript*/ }; static PyTypeObject __pyx_type_4borg_9hashindex_ChunkIndex = { PyVarObject_HEAD_INIT(0, 0) "borg.hashindex.ChunkIndex", /*tp_name*/ sizeof(struct __pyx_obj_4borg_9hashindex_ChunkIndex), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_9hashindex_IndexBase, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_ChunkIndex, /*tp_as_sequence*/ &__pyx_tp_as_mapping_ChunkIndex, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ "\n Mapping of 32 byte keys to (refcount, size, csize), which are all 32-bit unsigned.\n\n The reference count cannot overflow. If an overflow would occur, the refcount\n is fixed to MAX_VALUE and will neither increase nor decrease by incref(), decref()\n or add().\n\n Prior signed 32-bit overflow is handled correctly for most cases: All values\n from UINT32_MAX (2**32-1, inclusive) to MAX_VALUE (exclusive) are reserved and either\n cause silent data loss (-1, -2) or will raise an AssertionError when accessed.\n Other values are handled correctly. Note that previously the refcount could also reach\n 0 by *increasing* it.\n\n Assigning refcounts in this reserved range is an invalid operation and raises AssertionError.\n ", /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4borg_9hashindex_ChunkIndex, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_9hashindex_ChunkIndex, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_4borg_9hashindex_ChunkKeyIterator(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)o); p->idx = ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_1__cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_4borg_9hashindex_ChunkKeyIterator(PyObject *o) { struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *p = (struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->idx); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_4borg_9hashindex_ChunkKeyIterator(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *p = (struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)o; if (p->idx) { e = (*v)(((PyObject *)p->idx), a); if (e) return e; } return 0; } static int __pyx_tp_clear_4borg_9hashindex_ChunkKeyIterator(PyObject *o) { PyObject* tmp; struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *p = (struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator *)o; tmp = ((PyObject*)p->idx); p->idx = ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyObject *__pyx_specialmethod___pyx_pw_4borg_9hashindex_16ChunkKeyIterator_5__next__(PyObject *self, CYTHON_UNUSED PyObject *arg) {return __pyx_pw_4borg_9hashindex_16ChunkKeyIterator_5__next__(self);} static PyMethodDef __pyx_methods_4borg_9hashindex_ChunkKeyIterator[] = { {"__next__", (PyCFunction)__pyx_specialmethod___pyx_pw_4borg_9hashindex_16ChunkKeyIterator_5__next__, METH_NOARGS|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_7__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_16ChunkKeyIterator_9__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_4borg_9hashindex_ChunkKeyIterator = { PyVarObject_HEAD_INIT(0, 0) "borg.hashindex.ChunkKeyIterator", /*tp_name*/ sizeof(struct __pyx_obj_4borg_9hashindex_ChunkKeyIterator), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_9hashindex_ChunkKeyIterator, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_4borg_9hashindex_ChunkKeyIterator, /*tp_traverse*/ __pyx_tp_clear_4borg_9hashindex_ChunkKeyIterator, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ __pyx_pw_4borg_9hashindex_16ChunkKeyIterator_3__iter__, /*tp_iter*/ __pyx_pw_4borg_9hashindex_16ChunkKeyIterator_5__next__, /*tp_iternext*/ __pyx_methods_4borg_9hashindex_ChunkKeyIterator, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_9hashindex_ChunkKeyIterator, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_4borg_9hashindex_CacheSynchronizer(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)o); p->chunks = ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_pw_4borg_9hashindex_17CacheSynchronizer_1__cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_4borg_9hashindex_CacheSynchronizer(PyObject *o) { struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *p = (struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_pw_4borg_9hashindex_17CacheSynchronizer_3__dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->chunks); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_4borg_9hashindex_CacheSynchronizer(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *p = (struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)o; if (p->chunks) { e = (*v)(((PyObject *)p->chunks), a); if (e) return e; } return 0; } static int __pyx_tp_clear_4borg_9hashindex_CacheSynchronizer(PyObject *o) { PyObject* tmp; struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *p = (struct __pyx_obj_4borg_9hashindex_CacheSynchronizer *)o; tmp = ((PyObject*)p->chunks); p->chunks = ((struct __pyx_obj_4borg_9hashindex_ChunkIndex *)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyObject *__pyx_getprop_4borg_9hashindex_17CacheSynchronizer_num_files_totals(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_4borg_9hashindex_17CacheSynchronizer_16num_files_totals_1__get__(o); } static PyObject *__pyx_getprop_4borg_9hashindex_17CacheSynchronizer_num_files_parts(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_4borg_9hashindex_17CacheSynchronizer_15num_files_parts_1__get__(o); } static PyObject *__pyx_getprop_4borg_9hashindex_17CacheSynchronizer_size_totals(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_4borg_9hashindex_17CacheSynchronizer_11size_totals_1__get__(o); } static PyObject *__pyx_getprop_4borg_9hashindex_17CacheSynchronizer_size_parts(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_4borg_9hashindex_17CacheSynchronizer_10size_parts_1__get__(o); } static PyObject *__pyx_getprop_4borg_9hashindex_17CacheSynchronizer_csize_totals(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_4borg_9hashindex_17CacheSynchronizer_12csize_totals_1__get__(o); } static PyObject *__pyx_getprop_4borg_9hashindex_17CacheSynchronizer_csize_parts(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_4borg_9hashindex_17CacheSynchronizer_11csize_parts_1__get__(o); } static PyMethodDef __pyx_methods_4borg_9hashindex_CacheSynchronizer[] = { {"feed", (PyCFunction)__pyx_pw_4borg_9hashindex_17CacheSynchronizer_5feed, METH_O, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_17CacheSynchronizer_7__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_9hashindex_17CacheSynchronizer_9__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_4borg_9hashindex_CacheSynchronizer[] = { {(char *)"num_files_totals", __pyx_getprop_4borg_9hashindex_17CacheSynchronizer_num_files_totals, 0, (char *)0, 0}, {(char *)"num_files_parts", __pyx_getprop_4borg_9hashindex_17CacheSynchronizer_num_files_parts, 0, (char *)0, 0}, {(char *)"size_totals", __pyx_getprop_4borg_9hashindex_17CacheSynchronizer_size_totals, 0, (char *)0, 0}, {(char *)"size_parts", __pyx_getprop_4borg_9hashindex_17CacheSynchronizer_size_parts, 0, (char *)0, 0}, {(char *)"csize_totals", __pyx_getprop_4borg_9hashindex_17CacheSynchronizer_csize_totals, 0, (char *)0, 0}, {(char *)"csize_parts", __pyx_getprop_4borg_9hashindex_17CacheSynchronizer_csize_parts, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type_4borg_9hashindex_CacheSynchronizer = { PyVarObject_HEAD_INIT(0, 0) "borg.hashindex.CacheSynchronizer", /*tp_name*/ sizeof(struct __pyx_obj_4borg_9hashindex_CacheSynchronizer), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_9hashindex_CacheSynchronizer, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_4borg_9hashindex_CacheSynchronizer, /*tp_traverse*/ __pyx_tp_clear_4borg_9hashindex_CacheSynchronizer, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4borg_9hashindex_CacheSynchronizer, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_4borg_9hashindex_CacheSynchronizer, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_9hashindex_CacheSynchronizer, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_hashindex(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_hashindex}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "hashindex", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1_1_07, __pyx_k_1_1_07, sizeof(__pyx_k_1_1_07), 0, 1, 0, 0}, {&__pyx_n_s_API_VERSION, __pyx_k_API_VERSION, sizeof(__pyx_k_API_VERSION), 0, 0, 1, 1}, {&__pyx_n_s_CacheSynchronizer, __pyx_k_CacheSynchronizer, sizeof(__pyx_k_CacheSynchronizer), 0, 0, 1, 1}, {&__pyx_n_s_ChunkIndex, __pyx_k_ChunkIndex, sizeof(__pyx_k_ChunkIndex), 0, 0, 1, 1}, {&__pyx_n_s_ChunkIndexEntry, __pyx_k_ChunkIndexEntry, sizeof(__pyx_k_ChunkIndexEntry), 0, 0, 1, 1}, {&__pyx_n_u_ChunkIndexEntry, __pyx_k_ChunkIndexEntry, sizeof(__pyx_k_ChunkIndexEntry), 0, 1, 0, 1}, {&__pyx_n_s_ChunkKeyIterator, __pyx_k_ChunkKeyIterator, sizeof(__pyx_k_ChunkKeyIterator), 0, 0, 1, 1}, {&__pyx_kp_u_Expected_bytes_of_length_16_for, __pyx_k_Expected_bytes_of_length_16_for, sizeof(__pyx_k_Expected_bytes_of_length_16_for), 0, 1, 0, 0}, {&__pyx_n_s_FuseVersionsIndex, __pyx_k_FuseVersionsIndex, sizeof(__pyx_k_FuseVersionsIndex), 0, 0, 1, 1}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_n_s_KeyError, __pyx_k_KeyError, sizeof(__pyx_k_KeyError), 0, 0, 1, 1}, {&__pyx_n_s_MAX_LOAD_FACTOR, __pyx_k_MAX_LOAD_FACTOR, sizeof(__pyx_k_MAX_LOAD_FACTOR), 0, 0, 1, 1}, {&__pyx_n_s_MAX_VALUE, __pyx_k_MAX_VALUE, sizeof(__pyx_k_MAX_VALUE), 0, 0, 1, 1}, {&__pyx_n_s_NSIndex, __pyx_k_NSIndex, sizeof(__pyx_k_NSIndex), 0, 0, 1, 1}, {&__pyx_n_s_NSKeyIterator, __pyx_k_NSKeyIterator, sizeof(__pyx_k_NSKeyIterator), 0, 0, 1, 1}, {&__pyx_n_s_StopIteration, __pyx_k_StopIteration, sizeof(__pyx_k_StopIteration), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_kp_u_cache_sync_feed_failed, __pyx_k_cache_sync_feed_failed, sizeof(__pyx_k_cache_sync_feed_failed), 0, 1, 0, 0}, {&__pyx_kp_u_cache_sync_init_failed, __pyx_k_cache_sync_init_failed, sizeof(__pyx_k_cache_sync_init_failed), 0, 1, 0, 0}, {&__pyx_n_s_capacity, __pyx_k_capacity, sizeof(__pyx_k_capacity), 0, 0, 1, 1}, {&__pyx_n_s_chunks, __pyx_k_chunks, sizeof(__pyx_k_chunks), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1}, {&__pyx_n_s_csize, __pyx_k_csize, sizeof(__pyx_k_csize), 0, 0, 1, 1}, {&__pyx_n_s_default, __pyx_k_default, sizeof(__pyx_k_default), 0, 0, 1, 1}, {&__pyx_n_s_enter, __pyx_k_enter, sizeof(__pyx_k_enter), 0, 0, 1, 1}, {&__pyx_n_s_exit, __pyx_k_exit, sizeof(__pyx_k_exit), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_u_hashindex_delete_failed, __pyx_k_hashindex_delete_failed, sizeof(__pyx_k_hashindex_delete_failed), 0, 1, 0, 0}, {&__pyx_kp_u_hashindex_init_failed, __pyx_k_hashindex_init_failed, sizeof(__pyx_k_hashindex_init_failed), 0, 1, 0, 0}, {&__pyx_kp_u_hashindex_read_returned_NULL_wit, __pyx_k_hashindex_read_returned_NULL_wit, sizeof(__pyx_k_hashindex_read_returned_NULL_wit), 0, 1, 0, 0}, {&__pyx_kp_u_hashindex_set_failed, __pyx_k_hashindex_set_failed, sizeof(__pyx_k_hashindex_set_failed), 0, 1, 0, 0}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_kp_u_invalid_reference_count, __pyx_k_invalid_reference_count, sizeof(__pyx_k_invalid_reference_count), 0, 1, 0, 0}, {&__pyx_n_s_key, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1}, {&__pyx_n_s_key_size, __pyx_k_key_size, sizeof(__pyx_k_key_size), 0, 0, 1, 1}, {&__pyx_n_s_key_size_2, __pyx_k_key_size_2, sizeof(__pyx_k_key_size_2), 0, 0, 1, 1}, {&__pyx_n_s_locale, __pyx_k_locale, sizeof(__pyx_k_locale), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_marker, __pyx_k_marker, sizeof(__pyx_k_marker), 0, 0, 1, 1}, {&__pyx_kp_u_maximum_number_of_segments_reach, __pyx_k_maximum_number_of_segments_reach, sizeof(__pyx_k_maximum_number_of_segments_reach), 0, 1, 0, 0}, {&__pyx_kp_u_maximum_number_of_versions_reach, __pyx_k_maximum_number_of_versions_reach, sizeof(__pyx_k_maximum_number_of_versions_reach), 0, 1, 0, 0}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_namedtuple, __pyx_k_namedtuple, sizeof(__pyx_k_namedtuple), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_object, __pyx_k_object, sizeof(__pyx_k_object), 0, 0, 1, 1}, {&__pyx_n_s_open, __pyx_k_open, sizeof(__pyx_k_open), 0, 0, 1, 1}, {&__pyx_n_s_os, __pyx_k_os, sizeof(__pyx_k_os), 0, 0, 1, 1}, {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, {&__pyx_n_s_permit_compact, __pyx_k_permit_compact, sizeof(__pyx_k_permit_compact), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_u_rb, __pyx_k_rb, sizeof(__pyx_k_rb), 0, 1, 0, 1}, {&__pyx_n_s_read, __pyx_k_read, sizeof(__pyx_k_read), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_kp_u_refcount_size_csize, __pyx_k_refcount_size_csize, sizeof(__pyx_k_refcount_size_csize), 0, 1, 0, 0}, {&__pyx_n_s_refs, __pyx_k_refs, sizeof(__pyx_k_refs), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_kp_u_stats_against_key_contained_in_s, __pyx_k_stats_against_key_contained_in_s, sizeof(__pyx_k_stats_against_key_contained_in_s), 0, 1, 0, 0}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_value, __pyx_k_value, sizeof(__pyx_k_value), 0, 0, 1, 1}, {&__pyx_n_s_value_size, __pyx_k_value_size, sizeof(__pyx_k_value_size), 0, 0, 1, 1}, {&__pyx_n_u_wb, __pyx_k_wb, sizeof(__pyx_k_wb), 0, 1, 0, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_object = __Pyx_GetBuiltinName(__pyx_n_s_object); if (!__pyx_builtin_object) __PYX_ERR(0, 61, __pyx_L1_error) __pyx_builtin_open = __Pyx_GetBuiltinName(__pyx_n_s_open); if (!__pyx_builtin_open) __PYX_ERR(0, 97, __pyx_L1_error) __pyx_builtin_KeyError = __Pyx_GetBuiltinName(__pyx_n_s_KeyError); if (!__pyx_builtin_KeyError) __PYX_ERR(0, 138, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(0, 239, __pyx_L1_error) __pyx_builtin_StopIteration = __Pyx_GetBuiltinName(__pyx_n_s_StopIteration); if (!__pyx_builtin_StopIteration) __PYX_ERR(0, 261, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 408, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "borg/hashindex.pyx":97 * if path: * if isinstance(path, (str, bytes)): * with open(path, 'rb') as fd: # <<<<<<<<<<<<<< * self.index = hashindex_read(fd, permit_compact) * else: */ __pyx_tuple_ = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "borg/hashindex.pyx":105 * self.index = hashindex_init(capacity, self.key_size, self.value_size) * if not self.index: * raise Exception('hashindex_init failed') # <<<<<<<<<<<<<< * * def __dealloc__(self): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_hashindex_init_failed); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 105, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "borg/hashindex.pyx":140 * raise KeyError(key) * if rc == 0: * raise Exception('hashindex_delete failed') # <<<<<<<<<<<<<< * * def get(self, key, default=None): */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_hashindex_delete_failed); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "borg/hashindex.pyx":188 * assert data.version <= _MAX_VALUE, "maximum number of versions reached" * if not PyBytes_CheckExact(value[1]) or PyBytes_GET_SIZE(value[1]) != 16: * raise TypeError("Expected bytes of length 16 for second value") # <<<<<<<<<<<<<< * memcpy(data.hash, PyBytes_AS_STRING(value[1]), 16) * data.version = _htole32(data.version) */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Expected_bytes_of_length_16_for); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "borg/hashindex.pyx":192 * data.version = _htole32(data.version) * if not hashindex_set(self.index, key, &data): * raise Exception('hashindex_set failed') # <<<<<<<<<<<<<< * * def __contains__(self, key): */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_hashindex_set_failed); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "borg/hashindex.pyx":408 * master_values = hashindex_get(master, key) * if not master_values: * raise ValueError('stats_against: key contained in self but not in master_index.') # <<<<<<<<<<<<<< * our_refcount = _le32toh(our_values[0]) * chunk_size = _le32toh(master_values[1]) */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_stats_against_key_contained_in_s); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "borg/hashindex.pyx":516 * self.sync = cache_sync_init(self.chunks.index) * if not self.sync: * raise Exception('cache_sync_init failed') # <<<<<<<<<<<<<< * * def __dealloc__(self): */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_u_cache_sync_init_failed); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(0, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "borg/hashindex.pyx":272 * * * ChunkIndexEntry = namedtuple('ChunkIndexEntry', 'refcount size csize') # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(2, __pyx_n_u_ChunkIndexEntry, __pyx_kp_u_refcount_size_csize); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_12 = PyInt_FromLong(12); if (unlikely(!__pyx_int_12)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_16 = PyInt_FromLong(16); if (unlikely(!__pyx_int_16)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_20 = PyInt_FromLong(20); if (unlikely(!__pyx_int_20)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_32 = PyInt_FromLong(32); if (unlikely(!__pyx_int_32)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4294967295 = PyInt_FromString((char *)"4294967295", 0, 0); if (unlikely(!__pyx_int_4294967295)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __pyx_v_4borg_9hashindex__NoDefault = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_4borg_9hashindex_IndexBase) < 0) __PYX_ERR(0, 84, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_9hashindex_IndexBase.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_9hashindex_IndexBase.tp_dictoffset && __pyx_type_4borg_9hashindex_IndexBase.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_9hashindex_IndexBase.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_9hashindex_IndexBase) < 0) __PYX_ERR(0, 84, __pyx_L1_error) __pyx_ptype_4borg_9hashindex_IndexBase = &__pyx_type_4borg_9hashindex_IndexBase; __pyx_type_4borg_9hashindex_FuseVersionsIndex.tp_base = __pyx_ptype_4borg_9hashindex_IndexBase; if (PyType_Ready(&__pyx_type_4borg_9hashindex_FuseVersionsIndex) < 0) __PYX_ERR(0, 169, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_9hashindex_FuseVersionsIndex.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_9hashindex_FuseVersionsIndex.tp_dictoffset && __pyx_type_4borg_9hashindex_FuseVersionsIndex.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_9hashindex_FuseVersionsIndex.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_FuseVersionsIndex, (PyObject *)&__pyx_type_4borg_9hashindex_FuseVersionsIndex) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_9hashindex_FuseVersionsIndex) < 0) __PYX_ERR(0, 169, __pyx_L1_error) __pyx_ptype_4borg_9hashindex_FuseVersionsIndex = &__pyx_type_4borg_9hashindex_FuseVersionsIndex; __pyx_type_4borg_9hashindex_NSIndex.tp_base = __pyx_ptype_4borg_9hashindex_IndexBase; if (PyType_Ready(&__pyx_type_4borg_9hashindex_NSIndex) < 0) __PYX_ERR(0, 199, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_9hashindex_NSIndex.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_9hashindex_NSIndex.tp_dictoffset && __pyx_type_4borg_9hashindex_NSIndex.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_9hashindex_NSIndex.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_NSIndex, (PyObject *)&__pyx_type_4borg_9hashindex_NSIndex) < 0) __PYX_ERR(0, 199, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_9hashindex_NSIndex) < 0) __PYX_ERR(0, 199, __pyx_L1_error) __pyx_ptype_4borg_9hashindex_NSIndex = &__pyx_type_4borg_9hashindex_NSIndex; if (PyType_Ready(&__pyx_type_4borg_9hashindex_NSKeyIterator) < 0) __PYX_ERR(0, 244, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_9hashindex_NSKeyIterator.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_9hashindex_NSKeyIterator.tp_dictoffset && __pyx_type_4borg_9hashindex_NSKeyIterator.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_9hashindex_NSKeyIterator.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_NSKeyIterator, (PyObject *)&__pyx_type_4borg_9hashindex_NSKeyIterator) < 0) __PYX_ERR(0, 244, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_9hashindex_NSKeyIterator) < 0) __PYX_ERR(0, 244, __pyx_L1_error) __pyx_ptype_4borg_9hashindex_NSKeyIterator = &__pyx_type_4borg_9hashindex_NSKeyIterator; __pyx_vtabptr_4borg_9hashindex_ChunkIndex = &__pyx_vtable_4borg_9hashindex_ChunkIndex; __pyx_vtable_4borg_9hashindex_ChunkIndex._add = (PyObject *(*)(struct __pyx_obj_4borg_9hashindex_ChunkIndex *, void *, uint32_t *))__pyx_f_4borg_9hashindex_10ChunkIndex__add; __pyx_type_4borg_9hashindex_ChunkIndex.tp_base = __pyx_ptype_4borg_9hashindex_IndexBase; if (PyType_Ready(&__pyx_type_4borg_9hashindex_ChunkIndex) < 0) __PYX_ERR(0, 275, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_9hashindex_ChunkIndex.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_9hashindex_ChunkIndex.tp_dictoffset && __pyx_type_4borg_9hashindex_ChunkIndex.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_9hashindex_ChunkIndex.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type_4borg_9hashindex_ChunkIndex.tp_dict, __pyx_vtabptr_4borg_9hashindex_ChunkIndex) < 0) __PYX_ERR(0, 275, __pyx_L1_error) if (PyObject_SetAttr(__pyx_m, __pyx_n_s_ChunkIndex, (PyObject *)&__pyx_type_4borg_9hashindex_ChunkIndex) < 0) __PYX_ERR(0, 275, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_9hashindex_ChunkIndex) < 0) __PYX_ERR(0, 275, __pyx_L1_error) __pyx_ptype_4borg_9hashindex_ChunkIndex = &__pyx_type_4borg_9hashindex_ChunkIndex; if (PyType_Ready(&__pyx_type_4borg_9hashindex_ChunkKeyIterator) < 0) __PYX_ERR(0, 474, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_9hashindex_ChunkKeyIterator.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_9hashindex_ChunkKeyIterator.tp_dictoffset && __pyx_type_4borg_9hashindex_ChunkKeyIterator.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_9hashindex_ChunkKeyIterator.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_ChunkKeyIterator, (PyObject *)&__pyx_type_4borg_9hashindex_ChunkKeyIterator) < 0) __PYX_ERR(0, 474, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_9hashindex_ChunkKeyIterator) < 0) __PYX_ERR(0, 474, __pyx_L1_error) __pyx_ptype_4borg_9hashindex_ChunkKeyIterator = &__pyx_type_4borg_9hashindex_ChunkKeyIterator; if (PyType_Ready(&__pyx_type_4borg_9hashindex_CacheSynchronizer) < 0) __PYX_ERR(0, 508, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_9hashindex_CacheSynchronizer.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_9hashindex_CacheSynchronizer.tp_dictoffset && __pyx_type_4borg_9hashindex_CacheSynchronizer.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_9hashindex_CacheSynchronizer.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_CacheSynchronizer, (PyObject *)&__pyx_type_4borg_9hashindex_CacheSynchronizer) < 0) __PYX_ERR(0, 508, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_9hashindex_CacheSynchronizer) < 0) __PYX_ERR(0, 508, __pyx_L1_error) __pyx_ptype_4borg_9hashindex_CacheSynchronizer = &__pyx_type_4borg_9hashindex_CacheSynchronizer; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC inithashindex(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC inithashindex(void) #else __Pyx_PyMODINIT_FUNC PyInit_hashindex(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_hashindex(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_hashindex(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'hashindex' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_hashindex(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("hashindex", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_borg__hashindex) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "borg.hashindex")) { if (unlikely(PyDict_SetItemString(modules, "borg.hashindex", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "borg/hashindex.pyx":3 * # cython: language_level=3 * * from collections import namedtuple # <<<<<<<<<<<<<< * import locale * import os */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_namedtuple); __Pyx_GIVEREF(__pyx_n_s_namedtuple); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_namedtuple); __pyx_t_2 = __Pyx_Import(__pyx_n_s_collections, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_namedtuple); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_namedtuple, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/hashindex.pyx":4 * * from collections import namedtuple * import locale # <<<<<<<<<<<<<< * import os * */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_locale, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_locale, __pyx_t_2) < 0) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/hashindex.pyx":5 * from collections import namedtuple * import locale * import os # <<<<<<<<<<<<<< * * cimport cython */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_os, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_os, __pyx_t_2) < 0) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/hashindex.pyx":15 * from cpython.bytes cimport PyBytes_FromStringAndSize, PyBytes_CheckExact, PyBytes_GET_SIZE, PyBytes_AS_STRING * * API_VERSION = '1.1_07' # <<<<<<<<<<<<<< * * */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_API_VERSION, __pyx_kp_u_1_1_07) < 0) __PYX_ERR(0, 15, __pyx_L1_error) /* "borg/hashindex.pyx":61 * * * cdef _NoDefault = object() # <<<<<<<<<<<<<< * * """ */ __pyx_t_2 = __Pyx_PyObject_CallNoArg(__pyx_builtin_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_v_4borg_9hashindex__NoDefault); __Pyx_DECREF_SET(__pyx_v_4borg_9hashindex__NoDefault, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/hashindex.pyx":78 * """ * * assert UINT32_MAX == 2**32-1 # <<<<<<<<<<<<<< * * assert _MAX_VALUE % 2 == 1 */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_2 = __Pyx_PyInt_From_uint32_t(UINT32_MAX); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_int_4294967295, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 78, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":80 * assert UINT32_MAX == 2**32-1 * * assert _MAX_VALUE % 2 == 1 # <<<<<<<<<<<<<< * * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__Pyx_mod_long(_MAX_VALUE, 2) == 1) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 80, __pyx_L1_error) } } #endif /* "borg/hashindex.pyx":88 * cdef int key_size * * _key_size = 32 # <<<<<<<<<<<<<< * * MAX_LOAD_FACTOR = HASH_MAX_LOAD */ if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_IndexBase->tp_dict, __pyx_n_s_key_size, __pyx_int_32) < 0) __PYX_ERR(0, 88, __pyx_L1_error) PyType_Modified(__pyx_ptype_4borg_9hashindex_IndexBase); /* "borg/hashindex.pyx":90 * _key_size = 32 * * MAX_LOAD_FACTOR = HASH_MAX_LOAD # <<<<<<<<<<<<<< * MAX_VALUE = _MAX_VALUE * */ __pyx_t_1 = PyFloat_FromDouble(HASH_MAX_LOAD); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_IndexBase->tp_dict, __pyx_n_s_MAX_LOAD_FACTOR, __pyx_t_1) < 0) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_ptype_4borg_9hashindex_IndexBase); /* "borg/hashindex.pyx":91 * * MAX_LOAD_FACTOR = HASH_MAX_LOAD * MAX_VALUE = _MAX_VALUE # <<<<<<<<<<<<<< * * def __cinit__(self, capacity=0, path=None, permit_compact=False): */ __pyx_t_1 = __Pyx_PyInt_From_uint32_t(_MAX_VALUE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_IndexBase->tp_dict, __pyx_n_s_MAX_VALUE, __pyx_t_1) < 0) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_ptype_4borg_9hashindex_IndexBase); /* "borg/hashindex.pyx":112 * * @classmethod * def read(cls, path, permit_compact=False): # <<<<<<<<<<<<<< * return cls(path=path, permit_compact=permit_compact) * */ __Pyx_GetNameInClass(__pyx_t_1, (PyObject *)__pyx_ptype_4borg_9hashindex_IndexBase, __pyx_n_s_read); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); /* "borg/hashindex.pyx":111 * hashindex_free(self.index) * * @classmethod # <<<<<<<<<<<<<< * def read(cls, path, permit_compact=False): * return cls(path=path, permit_compact=permit_compact) */ __pyx_t_2 = __Pyx_Method_ClassMethod(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_IndexBase->tp_dict, __pyx_n_s_read, __pyx_t_2) < 0) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_ptype_4borg_9hashindex_IndexBase); /* "borg/hashindex.pyx":148 * return default * * def pop(self, key, default=_NoDefault): # <<<<<<<<<<<<<< * try: * value = self[key] */ __Pyx_INCREF(__pyx_v_4borg_9hashindex__NoDefault); __pyx_k__4 = __pyx_v_4borg_9hashindex__NoDefault; __Pyx_GIVEREF(__pyx_v_4borg_9hashindex__NoDefault); /* "borg/hashindex.pyx":171 * cdef class FuseVersionsIndex(IndexBase): * # 4 byte version + 16 byte file contents hash * value_size = 20 # <<<<<<<<<<<<<< * _key_size = 16 * */ if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_FuseVersionsIndex->tp_dict, __pyx_n_s_value_size, __pyx_int_20) < 0) __PYX_ERR(0, 171, __pyx_L1_error) PyType_Modified(__pyx_ptype_4borg_9hashindex_FuseVersionsIndex); /* "borg/hashindex.pyx":172 * # 4 byte version + 16 byte file contents hash * value_size = 20 * _key_size = 16 # <<<<<<<<<<<<<< * * def __getitem__(self, key): */ if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_FuseVersionsIndex->tp_dict, __pyx_n_s_key_size, __pyx_int_16) < 0) __PYX_ERR(0, 172, __pyx_L1_error) PyType_Modified(__pyx_ptype_4borg_9hashindex_FuseVersionsIndex); /* "borg/hashindex.pyx":201 * cdef class NSIndex(IndexBase): * * value_size = 8 # <<<<<<<<<<<<<< * * def __getitem__(self, key): */ if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_NSIndex->tp_dict, __pyx_n_s_value_size, __pyx_int_8) < 0) __PYX_ERR(0, 201, __pyx_L1_error) PyType_Modified(__pyx_ptype_4borg_9hashindex_NSIndex); /* "borg/hashindex.pyx":272 * * * ChunkIndexEntry = namedtuple('ChunkIndexEntry', 'refcount size csize') # <<<<<<<<<<<<<< * * */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_namedtuple); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_ChunkIndexEntry, __pyx_t_1) < 0) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/hashindex.pyx":292 * """ * * value_size = 12 # <<<<<<<<<<<<<< * * def __getitem__(self, key): */ if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_9hashindex_ChunkIndex->tp_dict, __pyx_n_s_value_size, __pyx_int_12) < 0) __PYX_ERR(0, 292, __pyx_L1_error) PyType_Modified(__pyx_ptype_4borg_9hashindex_ChunkIndex); /* "borg/hashindex.pyx":1 * # cython: language_level=3 # <<<<<<<<<<<<<< * * from collections import namedtuple */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init borg.hashindex", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init borg.hashindex"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else if (likely(PyCFunction_Check(func))) #endif { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* None */ static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { long r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } /* ClassMethod */ static PyObject* __Pyx_Method_ClassMethod(PyObject *method) { #if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM <= 0x05080000 if (PyObject_TypeCheck(method, &PyWrapperDescr_Type)) { return PyClassMethod_New(method); } #else #if CYTHON_COMPILING_IN_PYSTON || CYTHON_COMPILING_IN_PYPY if (PyMethodDescr_Check(method)) #else #if PY_MAJOR_VERSION == 2 static PyTypeObject *methoddescr_type = NULL; if (methoddescr_type == NULL) { PyObject *meth = PyObject_GetAttrString((PyObject*)&PyList_Type, "append"); if (!meth) return NULL; methoddescr_type = Py_TYPE(meth); Py_DECREF(meth); } #else PyTypeObject *methoddescr_type = &PyMethodDescr_Type; #endif if (__Pyx_TypeCheck(method, methoddescr_type)) #endif { PyMethodDescrObject *descr = (PyMethodDescrObject *)method; #if PY_VERSION_HEX < 0x03020000 PyTypeObject *d_type = descr->d_type; #else PyTypeObject *d_type = descr->d_common.d_type; #endif return PyDescr_NewClassMethod(d_type, descr->d_method); } #endif else if (PyMethod_Check(method)) { return PyClassMethod_New(PyMethod_GET_FUNCTION(method)); } else { return PyClassMethod_New(method); } } /* GetNameInClass */ static PyObject *__Pyx_GetGlobalNameAfterAttributeLookup(PyObject *name) { PyObject *result; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); __Pyx_GetModuleGlobalNameUncached(result, name); return result; } static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) { PyObject *result; result = __Pyx_PyObject_GetAttrStr(nmspace, name); if (!result) { result = __Pyx_GetGlobalNameAfterAttributeLookup(name); } return result; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint32_t(uint32_t value) { const uint32_t neg_one = (uint32_t) ((uint32_t) 0 - (uint32_t) 1), const_zero = (uint32_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint32_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint32_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint32_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint32_t), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint64_t(uint64_t value) { const uint64_t neg_one = (uint64_t) ((uint64_t) 0 - (uint64_t) 1), const_zero = (uint64_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint64_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint64_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint64_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint64_t), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *x) { const uint32_t neg_one = (uint32_t) ((uint32_t) 0 - (uint32_t) 1), const_zero = (uint32_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(uint32_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(uint32_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (uint32_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, digits[0]) case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 2 * PyLong_SHIFT) { return (uint32_t) (((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 3 * PyLong_SHIFT) { return (uint32_t) (((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 4 * PyLong_SHIFT) { return (uint32_t) (((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (uint32_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(uint32_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case -1: __PYX_VERIFY_RETURN_INT(uint32_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, +digits[0]) case -2: if (8 * sizeof(uint32_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) ((((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -3: if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) ((((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -4: if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) ((((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; } #endif if (sizeof(uint32_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else uint32_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (uint32_t) -1; } } else { uint32_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (uint32_t) -1; val = __Pyx_PyInt_As_uint32_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to uint32_t"); return (uint32_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to uint32_t"); return (uint32_t) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; ip) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ borgbackup-1.1.15/src/borg/shellpattern.py0000644000175000017500000000441013771325506020503 0ustar useruser00000000000000import os import re def translate(pat, match_end=r"\Z"): """Translate a shell-style pattern to a regular expression. The pattern may include ``**`` ( stands for the platform-specific path separator; "/" on POSIX systems) for matching zero or more directory levels and "*" for matching zero or more arbitrary characters with the exception of any path separator. Wrap meta-characters in brackets for a literal match (i.e. "[?]" to match the literal character "?"). Using match_end=regex one can give a regular expression that is used to match after the regex that is generated from the pattern. The default is to match the end of the string. This function is derived from the "fnmatch" module distributed with the Python standard library. Copyright (C) 2001-2016 Python Software Foundation. All rights reserved. TODO: support {alt1,alt2} shell-style alternatives """ sep = os.path.sep n = len(pat) i = 0 res = "" while i < n: c = pat[i] i += 1 if c == "*": if i + 1 < n and pat[i] == "*" and pat[i + 1] == sep: # **/ == wildcard for 0+ full (relative) directory names with trailing slashes; the forward slash stands # for the platform-specific path separator res += r"(?:[^\%s]*\%s)*" % (sep, sep) i += 2 else: # * == wildcard for name parts (does not cross path separator) res += r"[^\%s]*" % sep elif c == "?": # ? == any single character excluding path separator res += r"[^\%s]" % sep elif c == "[": j = i if j < n and pat[j] == "!": j += 1 if j < n and pat[j] == "]": j += 1 while j < n and pat[j] != "]": j += 1 if j >= n: res += "\\[" else: stuff = pat[i:j].replace("\\", "\\\\") i = j + 1 if stuff[0] == "!": stuff = "^" + stuff[1:] elif stuff[0] == "^": stuff = "\\" + stuff res += "[%s]" % stuff else: res += re.escape(c) return "(?ms)" + res + match_end borgbackup-1.1.15/src/borg/compress.pyx0000644000175000017500000003576413771325506020041 0ustar useruser00000000000000# cython: language_level=3 """ borg.compress ============= Compression is applied to chunks after ID hashing (so the ID is a direct function of the plain chunk, compression is irrelevant to it), and of course before encryption. The "auto" mode (e.g. --compression auto,lzma,4) is implemented as a meta Compressor, meaning that Auto acts like a Compressor, but defers actual work to others (namely LZ4 as a heuristic whether compression is worth it, and the specified Compressor for the actual compression). Decompression is normally handled through Compressor.decompress which will detect which compressor has been used to compress the data and dispatch to the correct decompressor. """ import zlib try: import lzma except ImportError: lzma = None from .helpers import Buffer, DecompressionError API_VERSION = '1.1_06' cdef extern from "algorithms/lz4-libselect.h": int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize) nogil int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize) nogil int LZ4_compressBound(int inputSize) nogil cdef extern from "algorithms/zstd-libselect.h": size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) nogil size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t compressedSize) nogil size_t ZSTD_compressBound(size_t srcSize) nogil unsigned long long ZSTD_CONTENTSIZE_UNKNOWN unsigned long long ZSTD_CONTENTSIZE_ERROR unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) nogil unsigned ZSTD_isError(size_t code) nogil const char* ZSTD_getErrorName(size_t code) nogil buffer = Buffer(bytearray, size=0) cdef class CompressorBase: """ base class for all (de)compression classes, also handles compression format auto detection and adding/stripping the ID header (which enable auto detection). """ ID = b'\xFF\xFF' # reserved and not used # overwrite with a unique 2-bytes bytestring in child classes name = 'baseclass' @classmethod def detect(cls, data): return data.startswith(cls.ID) def __init__(self, **kwargs): pass def decide(self, data): """ Return which compressor will perform the actual compression for *data*. This exists for a very specific case: If borg recreate is instructed to recompress using Auto compression it needs to determine the _actual_ target compression of a chunk in order to detect whether it should be recompressed. For all Compressors that are not Auto this always returns *self*. """ return self def compress(self, data): """ Compress *data* (bytes) and return bytes result. Prepend the ID bytes of this compressor, which is needed so that the correct decompressor can be used for decompression. """ # add ID bytes return self.ID + data def decompress(self, data): """ Decompress *data* (bytes) and return bytes result. The leading Compressor ID bytes need to be present. Only handles input generated by _this_ Compressor - for a general purpose decompression method see *Compressor.decompress*. """ # strip ID bytes return data[2:] class CNONE(CompressorBase): """ none - no compression, just pass through data """ ID = b'\x00\x00' name = 'none' def compress(self, data): return super().compress(data) def decompress(self, data): data = super().decompress(data) if not isinstance(data, bytes): data = bytes(data) return data class LZ4(CompressorBase): """ raw LZ4 compression / decompression (liblz4). Features: - lz4 is super fast - wrapper releases CPython's GIL to support multithreaded code - uses safe lz4 methods that never go beyond the end of the output buffer """ ID = b'\x01\x00' name = 'lz4' def __init__(self, **kwargs): pass def compress(self, idata): if not isinstance(idata, bytes): idata = bytes(idata) # code below does not work with memoryview cdef int isize = len(idata) cdef int osize cdef char *source = idata cdef char *dest osize = LZ4_compressBound(isize) buf = buffer.get(osize) dest = buf osize = LZ4_compress_default(source, dest, isize, osize) if not osize: raise Exception('lz4 compress failed') return super().compress(dest[:osize]) def decompress(self, idata): if not isinstance(idata, bytes): idata = bytes(idata) # code below does not work with memoryview idata = super().decompress(idata) cdef int isize = len(idata) cdef int osize cdef int rsize cdef char *source = idata cdef char *dest # a bit more than 8MB is enough for the usual data sizes yielded by the chunker. # allocate more if isize * 3 is already bigger, to avoid having to resize often. osize = max(int(1.1 * 2**23), isize * 3) while True: try: buf = buffer.get(osize) except MemoryError: raise DecompressionError('MemoryError') dest = buf rsize = LZ4_decompress_safe(source, dest, isize, osize) if rsize >= 0: break if osize > 2 ** 27: # 128MiB (should be enough, considering max. repo obj size and very good compression) # this is insane, get out of here raise DecompressionError('lz4 decompress failed') # likely the buffer was too small, get a bigger one: osize = int(1.5 * osize) return dest[:rsize] class LZMA(CompressorBase): """ lzma compression / decompression """ ID = b'\x02\x00' name = 'lzma' def __init__(self, level=6, **kwargs): super().__init__(**kwargs) self.level = level if lzma is None: raise ValueError('No lzma support found.') def compress(self, data): # we do not need integrity checks in lzma, we do that already data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE) return super().compress(data) def decompress(self, data): data = super().decompress(data) try: return lzma.decompress(data) except lzma.LZMAError as e: raise DecompressionError(str(e)) from None class ZSTD(CompressorBase): """zstd compression / decompression (pypi: zstandard, gh: python-zstandard)""" # This is a NOT THREAD SAFE implementation. # Only ONE python context must to be created at a time. # It should work flawlessly as long as borg will call ONLY ONE compression job at time. ID = b'\x03\x00' name = 'zstd' def __init__(self, level=3, **kwargs): super().__init__(**kwargs) self.level = level def compress(self, idata): if not isinstance(idata, bytes): idata = bytes(idata) # code below does not work with memoryview cdef int isize = len(idata) cdef size_t osize cdef char *source = idata cdef char *dest cdef int level = self.level osize = ZSTD_compressBound(isize) buf = buffer.get(osize) dest = buf with nogil: osize = ZSTD_compress(dest, osize, source, isize, level) if ZSTD_isError(osize): raise Exception('zstd compress failed: %s' % ZSTD_getErrorName(osize)) return super().compress(dest[:osize]) def decompress(self, idata): if not isinstance(idata, bytes): idata = bytes(idata) # code below does not work with memoryview idata = super().decompress(idata) cdef int isize = len(idata) cdef unsigned long long osize cdef unsigned long long rsize cdef char *source = idata cdef char *dest osize = ZSTD_getFrameContentSize(source, isize) if osize == ZSTD_CONTENTSIZE_ERROR: raise DecompressionError('zstd get size failed: data was not compressed by zstd') if osize == ZSTD_CONTENTSIZE_UNKNOWN: raise DecompressionError('zstd get size failed: original size unknown') try: buf = buffer.get(osize) except MemoryError: raise DecompressionError('MemoryError') dest = buf with nogil: rsize = ZSTD_decompress(dest, osize, source, isize) if ZSTD_isError(rsize): raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) if rsize != osize: raise DecompressionError('zstd decompress failed: size mismatch') return dest[:osize] class ZLIB(CompressorBase): """ zlib compression / decompression (python stdlib) """ ID = b'\x08\x00' # not used here, see detect() # avoid all 0x.8.. IDs elsewhere! name = 'zlib' @classmethod def detect(cls, data): # matches misc. patterns 0x.8.. used by zlib cmf, flg = data[:2] is_deflate = cmf & 0x0f == 8 check_ok = (cmf * 256 + flg) % 31 == 0 return check_ok and is_deflate def __init__(self, level=6, **kwargs): super().__init__(**kwargs) self.level = level def compress(self, data): # note: for compatibility no super call, do not add ID bytes return zlib.compress(data, self.level) def decompress(self, data): # note: for compatibility no super call, do not strip ID bytes try: return zlib.decompress(data) except zlib.error as e: raise DecompressionError(str(e)) from None class Auto(CompressorBase): """ Meta-Compressor that decides which compression to use based on LZ4's ratio. As a meta-Compressor the actual compression is deferred to other Compressors, therefore this Compressor has no ID, no detect() and no decompress(). """ ID = None name = 'auto' def __init__(self, compressor): super().__init__() self.compressor = compressor self.lz4 = get_compressor('lz4') self.none = get_compressor('none') def _decide(self, data): """ Decides what to do with *data*. Returns (compressor, lz4_data). *lz4_data* is the LZ4 result if *compressor* is LZ4 as well, otherwise it is None. """ lz4_data = self.lz4.compress(data) ratio = len(lz4_data) / len(data) # lz4_data includes the compression type header, while data does not yet ratio = len(lz4_data) / (len(data) + 2) if ratio < 0.97: return self.compressor, lz4_data elif ratio < 1: return self.lz4, lz4_data else: return self.none, None def decide(self, data): return self._decide(data)[0] def compress(self, data): compressor, lz4_data = self._decide(data) if compressor is self.lz4: # we know that trying to compress with expensive compressor is likely pointless, # but lz4 managed to at least squeeze the data a bit. return lz4_data if compressor is self.none: # we know that trying to compress with expensive compressor is likely pointless # and also lz4 did not manage to squeeze the data (not even a bit). uncompressed_data = compressor.compress(data) return uncompressed_data # if we get here, the decider decided to try the expensive compressor. # we also know that lz4_data is smaller than uncompressed data. exp_compressed_data = compressor.compress(data) ratio = len(exp_compressed_data) / len(lz4_data) if ratio < 0.99: # the expensive compressor managed to squeeze the data significantly better than lz4. return exp_compressed_data else: # otherwise let's just store the lz4 data, which decompresses extremely fast. return lz4_data def decompress(self, data): raise NotImplementedError def detect(cls, data): raise NotImplementedError # Maps valid compressor names to their class COMPRESSOR_TABLE = { CNONE.name: CNONE, LZ4.name: LZ4, ZLIB.name: ZLIB, LZMA.name: LZMA, Auto.name: Auto, ZSTD.name: ZSTD, } # List of possible compression types. Does not include Auto, since it is a meta-Compressor. COMPRESSOR_LIST = [LZ4, ZSTD, CNONE, ZLIB, LZMA, ] # check fast stuff first def get_compressor(name, **kwargs): cls = COMPRESSOR_TABLE[name] return cls(**kwargs) class Compressor: """ compresses using a compressor with given name and parameters decompresses everything we can handle (autodetect) """ def __init__(self, name='null', **kwargs): self.params = kwargs self.compressor = get_compressor(name, **self.params) def compress(self, data): return self.compressor.compress(data) def decompress(self, data): compressor_cls = self.detect(data) return compressor_cls(**self.params).decompress(data) @staticmethod def detect(data): hdr = bytes(data[:2]) # detect() does not work with memoryview for cls in COMPRESSOR_LIST: if cls.detect(hdr): return cls else: raise ValueError('No decompressor for this data found: %r.', data[:2]) class CompressionSpec: def __init__(self, s): values = s.split(',') count = len(values) if count < 1: raise ValueError # --compression algo[,level] self.name = values[0] if self.name in ('none', 'lz4', ): return elif self.name in ('zlib', 'lzma', ): if count < 2: level = 6 # default compression level in py stdlib elif count == 2: level = int(values[1]) if not 0 <= level <= 9: raise ValueError else: raise ValueError self.level = level elif self.name in ('zstd', ): if count < 2: level = 3 # default compression level in zstd elif count == 2: level = int(values[1]) if not 1 <= level <= 22: raise ValueError else: raise ValueError self.level = level elif self.name == 'auto': if 2 <= count <= 3: compression = ','.join(values[1:]) else: raise ValueError self.inner = CompressionSpec(compression) else: raise ValueError @property def compressor(self): if self.name in ('none', 'lz4', ): return get_compressor(self.name) elif self.name in ('zlib', 'lzma', 'zstd', ): return get_compressor(self.name, level=self.level) elif self.name == 'auto': return get_compressor(self.name, compressor=self.inner.compressor) borgbackup-1.1.15/src/borg/locking.py0000644000175000017500000003446313771325506017437 0ustar useruser00000000000000import errno import json import os import time from . import platform from .helpers import Error, ErrorWithTraceback from .logger import create_logger ADD, REMOVE = 'add', 'remove' SHARED, EXCLUSIVE = 'shared', 'exclusive' logger = create_logger(__name__) class TimeoutTimer: """ A timer for timeout checks (can also deal with "never timeout"). It can also compute and optionally execute a reasonable sleep time (e.g. to avoid polling too often or to support thread/process rescheduling). """ def __init__(self, timeout=None, sleep=None): """ Initialize a timer. :param timeout: time out interval [s] or None (never timeout, wait forever) [default] :param sleep: sleep interval [s] (>= 0: do sleep call, <0: don't call sleep) or None (autocompute: use 10% of timeout [but not more than 60s], or 1s for "never timeout" mode) """ if timeout is not None and timeout < 0: raise ValueError("timeout must be >= 0") self.timeout_interval = timeout if sleep is None: if timeout is None: sleep = 1.0 else: sleep = min(60.0, timeout / 10.0) self.sleep_interval = sleep self.start_time = None self.end_time = None def __repr__(self): return "<%s: start=%r end=%r timeout=%r sleep=%r>" % ( self.__class__.__name__, self.start_time, self.end_time, self.timeout_interval, self.sleep_interval) def start(self): self.start_time = time.time() if self.timeout_interval is not None: self.end_time = self.start_time + self.timeout_interval return self def sleep(self): if self.sleep_interval >= 0: time.sleep(self.sleep_interval) def timed_out(self): return self.end_time is not None and time.time() >= self.end_time def timed_out_or_sleep(self): if self.timed_out(): return True else: self.sleep() return False class LockError(Error): """Failed to acquire the lock {}.""" class LockErrorT(ErrorWithTraceback): """Failed to acquire the lock {}.""" class LockTimeout(LockError): """Failed to create/acquire the lock {} (timeout).""" class LockFailed(LockErrorT): """Failed to create/acquire the lock {} ({}).""" class NotLocked(LockErrorT): """Failed to release the lock {} (was not locked).""" class NotMyLock(LockErrorT): """Failed to release the lock {} (was/is locked, but not by me).""" class ExclusiveLock: """An exclusive Lock based on mkdir fs operation being atomic. If possible, try to use the contextmanager here like:: with ExclusiveLock(...) as lock: ... This makes sure the lock is released again if the block is left, no matter how (e.g. if an exception occurred). """ def __init__(self, path, timeout=None, sleep=None, id=None, kill_stale_locks=False): self.timeout = timeout self.sleep = sleep self.path = os.path.abspath(path) self.id = id or platform.get_process_id() self.unique_name = os.path.join(self.path, "%s.%d-%x" % self.id) self.kill_stale_locks = kill_stale_locks self.stale_warning_printed = False def __enter__(self): return self.acquire() def __exit__(self, *exc): self.release() def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.unique_name) def acquire(self, timeout=None, sleep=None): if timeout is None: timeout = self.timeout if sleep is None: sleep = self.sleep timer = TimeoutTimer(timeout, sleep).start() while True: try: os.mkdir(self.path) except FileExistsError: # already locked if self.by_me(): return self self.kill_stale_lock() if timer.timed_out_or_sleep(): raise LockTimeout(self.path) except OSError as err: raise LockFailed(self.path, str(err)) from None else: with open(self.unique_name, "wb"): pass return self def release(self): if not self.is_locked(): raise NotLocked(self.path) if not self.by_me(): raise NotMyLock(self.path) os.unlink(self.unique_name) os.rmdir(self.path) def is_locked(self): return os.path.exists(self.path) def by_me(self): return os.path.exists(self.unique_name) def kill_stale_lock(self): for name in os.listdir(self.path): try: host_pid, thread_str = name.rsplit('-', 1) host, pid_str = host_pid.rsplit('.', 1) pid = int(pid_str) thread = int(thread_str) except ValueError: # Malformed lock name? Or just some new format we don't understand? logger.error("Found malformed lock %s in %s. Please check/fix manually.", name, self.path) return False if platform.process_alive(host, pid, thread): return False if not self.kill_stale_locks: if not self.stale_warning_printed: # Log this at warning level to hint the user at the ability logger.warning("Found stale lock %s, but not deleting because BORG_HOSTNAME_IS_UNIQUE is False.", name) self.stale_warning_printed = True return False try: os.unlink(os.path.join(self.path, name)) logger.warning('Killed stale lock %s.', name) except OSError as err: if not self.stale_warning_printed: # This error will bubble up and likely result in locking failure logger.error('Found stale lock %s, but cannot delete due to %s', name, str(err)) self.stale_warning_printed = True return False try: os.rmdir(self.path) except OSError as err: if err.errno == errno.ENOTEMPTY: # Directory is not empty = we lost the race to somebody else return False # EACCES or EIO or ... = we cannot operate anyway logger.error('Failed to remove lock dir: %s', str(err)) return False return True def break_lock(self): if self.is_locked(): for name in os.listdir(self.path): os.unlink(os.path.join(self.path, name)) os.rmdir(self.path) def migrate_lock(self, old_id, new_id): """migrate the lock ownership from old_id to new_id""" assert self.id == old_id new_unique_name = os.path.join(self.path, "%s.%d-%x" % new_id) if self.is_locked() and self.by_me(): with open(new_unique_name, "wb"): pass os.unlink(self.unique_name) self.id, self.unique_name = new_id, new_unique_name class LockRoster: """ A Lock Roster to track shared/exclusive lockers. Note: you usually should call the methods with an exclusive lock held, to avoid conflicting access by multiple threads/processes/machines. """ def __init__(self, path, id=None, kill_stale_locks=False): self.path = path self.id = id or platform.get_process_id() self.kill_stale_locks = kill_stale_locks def load(self): try: with open(self.path) as f: data = json.load(f) # Just nuke the stale locks early on load if self.kill_stale_locks: for key in (SHARED, EXCLUSIVE): try: entries = data[key] except KeyError: continue elements = set() for host, pid, thread in entries: if platform.process_alive(host, pid, thread): elements.add((host, pid, thread)) else: logger.warning('Removed stale %s roster lock for host %s pid %d thread %d.', key, host, pid, thread) data[key] = list(elements) except (FileNotFoundError, ValueError): # no or corrupt/empty roster file? data = {} return data def save(self, data): with open(self.path, "w") as f: json.dump(data, f) def remove(self): try: os.unlink(self.path) except FileNotFoundError: pass def get(self, key): roster = self.load() return set(tuple(e) for e in roster.get(key, [])) def empty(self, *keys): return all(not self.get(key) for key in keys) def modify(self, key, op): roster = self.load() try: elements = set(tuple(e) for e in roster[key]) except KeyError: elements = set() if op == ADD: elements.add(self.id) elif op == REMOVE: elements.remove(self.id) else: raise ValueError('Unknown LockRoster op %r' % op) roster[key] = list(list(e) for e in elements) self.save(roster) def migrate_lock(self, key, old_id, new_id): """migrate the lock ownership from old_id to new_id""" assert self.id == old_id # need to temporarily switch off stale lock killing as we want to # rather migrate than kill them (at least the one made by old_id). killing, self.kill_stale_locks = self.kill_stale_locks, False try: try: self.modify(key, REMOVE) except KeyError: # entry was not there, so no need to add a new one, but still update our id self.id = new_id else: # old entry removed, update our id and add a updated entry self.id = new_id self.modify(key, ADD) finally: self.kill_stale_locks = killing class Lock: """ A Lock for a resource that can be accessed in a shared or exclusive way. Typically, write access to a resource needs an exclusive lock (1 writer, noone is allowed reading) and read access to a resource needs a shared lock (multiple readers are allowed). If possible, try to use the contextmanager here like:: with Lock(...) as lock: ... This makes sure the lock is released again if the block is left, no matter how (e.g. if an exception occurred). """ def __init__(self, path, exclusive=False, sleep=None, timeout=None, id=None, kill_stale_locks=False): self.path = path self.is_exclusive = exclusive self.sleep = sleep self.timeout = timeout self.id = id or platform.get_process_id() # globally keeping track of shared and exclusive lockers: self._roster = LockRoster(path + '.roster', id=id, kill_stale_locks=kill_stale_locks) # an exclusive lock, used for: # - holding while doing roster queries / updates # - holding while the Lock itself is exclusive self._lock = ExclusiveLock(path + '.exclusive', id=id, timeout=timeout, kill_stale_locks=kill_stale_locks) def __enter__(self): return self.acquire() def __exit__(self, *exc): self.release() def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.id) def acquire(self, exclusive=None, remove=None, sleep=None): if exclusive is None: exclusive = self.is_exclusive sleep = sleep or self.sleep or 0.2 if exclusive: self._wait_for_readers_finishing(remove, sleep) self._roster.modify(EXCLUSIVE, ADD) else: with self._lock: if remove is not None: self._roster.modify(remove, REMOVE) self._roster.modify(SHARED, ADD) self.is_exclusive = exclusive return self def _wait_for_readers_finishing(self, remove, sleep): timer = TimeoutTimer(self.timeout, sleep).start() while True: self._lock.acquire() try: if remove is not None: self._roster.modify(remove, REMOVE) if len(self._roster.get(SHARED)) == 0: return # we are the only one and we keep the lock! # restore the roster state as before (undo the roster change): if remove is not None: self._roster.modify(remove, ADD) except: # avoid orphan lock when an exception happens here, e.g. Ctrl-C! self._lock.release() raise else: self._lock.release() if timer.timed_out_or_sleep(): raise LockTimeout(self.path) def release(self): if self.is_exclusive: self._roster.modify(EXCLUSIVE, REMOVE) if self._roster.empty(EXCLUSIVE, SHARED): self._roster.remove() self._lock.release() else: with self._lock: self._roster.modify(SHARED, REMOVE) if self._roster.empty(EXCLUSIVE, SHARED): self._roster.remove() def upgrade(self): # WARNING: if multiple read-lockers want to upgrade, it will deadlock because they # all will wait until the other read locks go away - and that won't happen. if not self.is_exclusive: self.acquire(exclusive=True, remove=SHARED) def downgrade(self): if self.is_exclusive: self.acquire(exclusive=False, remove=EXCLUSIVE) def got_exclusive_lock(self): return self.is_exclusive and self._lock.is_locked() and self._lock.by_me() def break_lock(self): self._roster.remove() self._lock.break_lock() def migrate_lock(self, old_id, new_id): assert self.id == old_id self.id = new_id if self.is_exclusive: self._lock.migrate_lock(old_id, new_id) self._roster.migrate_lock(EXCLUSIVE, old_id, new_id) else: with self._lock: self._lock.migrate_lock(old_id, new_id) self._roster.migrate_lock(SHARED, old_id, new_id) borgbackup-1.1.15/src/borg/compress.c0000644000175000017500000241025213771325771017436 0ustar useruser00000000000000/* Generated by Cython 0.29.21 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__borg__compress #define __PYX_HAVE_API__borg__compress /* Early includes */ #include "algorithms/lz4-libselect.h" #include "algorithms/zstd-libselect.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "src/borg/compress.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /*--- Type declarations ---*/ struct __pyx_obj_4borg_8compress_CompressorBase; /* "borg/compress.pyx":52 * * * cdef class CompressorBase: # <<<<<<<<<<<<<< * """ * base class for all (de)compression classes, */ struct __pyx_obj_4borg_8compress_CompressorBase { PyObject_HEAD }; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* KeywordStringCheck.proto */ static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyIntFromDouble.proto */ #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value); #else #define __Pyx_PyInt_FromDouble(value) PyLong_FromDouble(value) #endif /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyObjectSetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS #define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); #else #define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) #define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) #endif /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* PyIntCompare.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AndObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AndObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAnd(op1, op2) : PyNumber_And(op1, op2)) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_RemainderObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_RemainderObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceRemainder(op1, op2) : PyNumber_Remainder(op1, op2)) #endif /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* IncludeStringH.proto */ #include /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* ClassMethod.proto */ #include "descrobject.h" static CYTHON_UNUSED PyObject* __Pyx_Method_ClassMethod(PyObject *method); /* GetNameInClass.proto */ #define __Pyx_GetNameInClass(var, nmspace, name) (var) = __Pyx__GetNameInClass(nmspace, name) static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /* CalculateMetaclass.proto */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); /* SetNameInClass.proto */ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) #elif CYTHON_COMPILING_IN_CPYTHON #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) #else #define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) #endif /* FetchCommonType.proto */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); /* CythonFunctionShared.proto */ #define __Pyx_CyFunction_USED 1 #define __Pyx_CYFUNCTION_STATICMETHOD 0x01 #define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 #define __Pyx_CYFUNCTION_CCLASS 0x04 #define __Pyx_CyFunction_GetClosure(f)\ (((__pyx_CyFunctionObject *) (f))->func_closure) #define __Pyx_CyFunction_GetClassObj(f)\ (((__pyx_CyFunctionObject *) (f))->func_classobj) #define __Pyx_CyFunction_Defaults(type, f)\ ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) #define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) typedef struct { PyCFunctionObject func; #if PY_VERSION_HEX < 0x030500A0 PyObject *func_weakreflist; #endif PyObject *func_dict; PyObject *func_name; PyObject *func_qualname; PyObject *func_doc; PyObject *func_globals; PyObject *func_code; PyObject *func_closure; PyObject *func_classobj; void *defaults; int defaults_pyobjects; size_t defaults_size; // used by FusedFunction for copying defaults int flags; PyObject *defaults_tuple; PyObject *defaults_kwdict; PyObject *(*defaults_getter)(PyObject *); PyObject *func_annotations; } __pyx_CyFunctionObject; static PyTypeObject *__pyx_CyFunctionType = 0; #define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType)) static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject* code); static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, size_t size, int pyobjects); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, PyObject *dict); static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); static int __pyx_CyFunction_init(void); /* CythonFunction.proto */ static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject *globals, PyObject* code); /* Py3ClassCreate.proto */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc); static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /* CyFunctionClassCell.proto */ static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'borg.compress' */ static PyTypeObject *__pyx_ptype_4borg_8compress_CompressorBase = 0; static PyObject *__pyx_f_4borg_8compress___pyx_unpickle_CompressorBase__set_state(struct __pyx_obj_4borg_8compress_CompressorBase *, PyObject *); /*proto*/ #define __Pyx_MODULE_NAME "borg.compress" extern int __pyx_module_is_main_borg__compress; int __pyx_module_is_main_borg__compress = 0; /* Implementation of 'borg.compress' */ static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_staticmethod; static PyObject *__pyx_builtin_property; static PyObject *__pyx_builtin_super; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_NotImplementedError; static const char __pyx_k_e[] = "e"; static const char __pyx_k_s[] = "s"; static const char __pyx_k_ID[] = "ID"; static const char __pyx_k__5[] = ","; static const char __pyx_k__7[] = "\377\377"; static const char __pyx_k__8[] = "\000\000"; static const char __pyx_k_LZ4[] = "LZ4"; static const char __pyx_k__13[] = "\001\000"; static const char __pyx_k__20[] = "\002\000"; static const char __pyx_k__28[] = "\003\000"; static const char __pyx_k__36[] = "\010\000"; static const char __pyx_k_buf[] = "buf"; static const char __pyx_k_cls[] = "cls"; static const char __pyx_k_cmf[] = "cmf"; static const char __pyx_k_doc[] = "__doc__"; static const char __pyx_k_flg[] = "flg"; static const char __pyx_k_get[] = "get"; static const char __pyx_k_hdr[] = "hdr"; static const char __pyx_k_lz4[] = "lz4"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_Auto[] = "Auto"; static const char __pyx_k_LZMA[] = "LZMA"; static const char __pyx_k_ZLIB[] = "ZLIB"; static const char __pyx_k_ZSTD[] = "ZSTD"; static const char __pyx_k_auto[] = "auto"; static const char __pyx_k_data[] = "data"; static const char __pyx_k_dest[] = "dest"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_init[] = "__init__"; static const char __pyx_k_lzma[] = "lzma"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_none[] = "none"; static const char __pyx_k_null[] = "null"; static const char __pyx_k_self[] = "self"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_zlib[] = "zlib"; static const char __pyx_k_zstd[] = "zstd"; static const char __pyx_k_CNONE[] = "CNONE"; static const char __pyx_k_check[] = "check"; static const char __pyx_k_count[] = "count"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_idata[] = "idata"; static const char __pyx_k_inner[] = "inner"; static const char __pyx_k_isize[] = "isize"; static const char __pyx_k_level[] = "level"; static const char __pyx_k_osize[] = "osize"; static const char __pyx_k_ratio[] = "ratio"; static const char __pyx_k_rsize[] = "rsize"; static const char __pyx_k_split[] = "split"; static const char __pyx_k_super[] = "super"; static const char __pyx_k_1_1_06[] = "1.1_06"; static const char __pyx_k_Buffer[] = "Buffer"; static const char __pyx_k_buffer[] = "buffer"; static const char __pyx_k_decide[] = "_decide"; static const char __pyx_k_detect[] = "detect"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_kwargs[] = "kwargs"; static const char __pyx_k_module[] = "__module__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_params[] = "params"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_preset[] = "preset"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_source[] = "source"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_values[] = "values"; static const char __pyx_k_helpers[] = "helpers"; static const char __pyx_k_prepare[] = "__prepare__"; static const char __pyx_k_check_ok[] = "check_ok"; static const char __pyx_k_compress[] = "compress"; static const char __pyx_k_decide_2[] = "decide"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_lz4_data[] = "lz4_data"; static const char __pyx_k_property[] = "property"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_qualname[] = "__qualname__"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_LZMAError[] = "LZMAError"; static const char __pyx_k_baseclass[] = "baseclass"; static const char __pyx_k_metaclass[] = "__metaclass__"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_CHECK_NONE[] = "CHECK_NONE"; static const char __pyx_k_Compressor[] = "Compressor"; static const char __pyx_k_LZ4___init[] = "LZ4.__init__"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_compressor[] = "compressor"; static const char __pyx_k_decompress[] = "decompress"; static const char __pyx_k_is_deflate[] = "is_deflate"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_startswith[] = "startswith"; static const char __pyx_k_API_VERSION[] = "API_VERSION"; static const char __pyx_k_Auto___init[] = "Auto.__init__"; static const char __pyx_k_Auto_decide[] = "Auto.decide"; static const char __pyx_k_Auto_detect[] = "Auto.detect"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_LZMA___init[] = "LZMA.__init__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_ZLIB___init[] = "ZLIB.__init__"; static const char __pyx_k_ZLIB_detect[] = "ZLIB.detect"; static const char __pyx_k_ZSTD___init[] = "ZSTD.__init__"; static const char __pyx_k_compression[] = "compression"; static const char __pyx_k_Auto__decide[] = "Auto._decide"; static const char __pyx_k_LZ4_compress[] = "LZ4.compress"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_staticmethod[] = "staticmethod"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_Auto_compress[] = "Auto.compress"; static const char __pyx_k_LZMA_compress[] = "LZMA.compress"; static const char __pyx_k_ZLIB_compress[] = "ZLIB.compress"; static const char __pyx_k_ZSTD_compress[] = "ZSTD.compress"; static const char __pyx_k_borg_compress[] = "borg.compress"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_CNONE_compress[] = "CNONE.compress"; static const char __pyx_k_CompressorBase[] = "CompressorBase"; static const char __pyx_k_LZ4_decompress[] = "LZ4.decompress"; static const char __pyx_k_compressor_cls[] = "compressor_cls"; static const char __pyx_k_get_compressor[] = "get_compressor"; static const char __pyx_k_Auto_decompress[] = "Auto.decompress"; static const char __pyx_k_COMPRESSOR_LIST[] = "COMPRESSOR_LIST"; static const char __pyx_k_CompressionSpec[] = "CompressionSpec"; static const char __pyx_k_LZMA_decompress[] = "LZMA.decompress"; static const char __pyx_k_ZLIB_decompress[] = "ZLIB.decompress"; static const char __pyx_k_ZSTD_decompress[] = "ZSTD.decompress"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_CNONE_decompress[] = "CNONE.decompress"; static const char __pyx_k_COMPRESSOR_TABLE[] = "COMPRESSOR_TABLE"; static const char __pyx_k_Compressor___init[] = "Compressor.__init__"; static const char __pyx_k_Compressor_detect[] = "Compressor.detect"; static const char __pyx_k_uncompressed_data[] = "uncompressed_data"; static const char __pyx_k_DecompressionError[] = "DecompressionError"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_Compressor_compress[] = "Compressor.compress"; static const char __pyx_k_NotImplementedError[] = "NotImplementedError"; static const char __pyx_k_exp_compressed_data[] = "exp_compressed_data"; static const char __pyx_k_lz4_compress_failed[] = "lz4 compress failed"; static const char __pyx_k_Compressor_decompress[] = "Compressor.decompress"; static const char __pyx_k_No_lzma_support_found[] = "No lzma support found."; static const char __pyx_k_lz4_decompress_failed[] = "lz4 decompress failed"; static const char __pyx_k_src_borg_compress_pyx[] = "src/borg/compress.pyx"; static const char __pyx_k_CompressionSpec___init[] = "CompressionSpec.__init__"; static const char __pyx_k_zstd_compress_failed_s[] = "zstd compress failed: %s"; static const char __pyx_k_zstd_decompress_failed_s[] = "zstd decompress failed: %s"; static const char __pyx_k_CompressionSpec_compressor[] = "CompressionSpec.compressor"; static const char __pyx_k_pyx_unpickle_CompressorBase[] = "__pyx_unpickle_CompressorBase"; static const char __pyx_k_lzma_compression_decompression[] = "\n lzma compression / decompression\n "; static const char __pyx_k_zlib_compression_decompression[] = "\n zlib compression / decompression (python stdlib)\n "; static const char __pyx_k_Meta_Compressor_that_decides_wh[] = "\n Meta-Compressor that decides which compression to use based on LZ4's ratio.\n\n As a meta-Compressor the actual compression is deferred to other Compressors,\n therefore this Compressor has no ID, no detect() and no decompress().\n "; static const char __pyx_k_borg_compress_Compression_is_ap[] = "\nborg.compress\n=============\n\nCompression is applied to chunks after ID hashing (so the ID is a direct function of the\nplain chunk, compression is irrelevant to it), and of course before encryption.\n\nThe \"auto\" mode (e.g. --compression auto,lzma,4) is implemented as a meta Compressor,\nmeaning that Auto acts like a Compressor, but defers actual work to others (namely\nLZ4 as a heuristic whether compression is worth it, and the specified Compressor\nfor the actual compression).\n\nDecompression is normally handled through Compressor.decompress which will detect\nwhich compressor has been used to compress the data and dispatch to the correct\ndecompressor.\n"; static const char __pyx_k_compresses_using_a_compressor_w[] = "\n compresses using a compressor with given name and parameters\n decompresses everything we can handle (autodetect)\n "; static const char __pyx_k_none_no_compression_just_pass_t[] = "\n none - no compression, just pass through data\n "; static const char __pyx_k_raw_LZ4_compression_decompressi[] = "\n raw LZ4 compression / decompression (liblz4).\n\n Features:\n - lz4 is super fast\n - wrapper releases CPython's GIL to support multithreaded code\n - uses safe lz4 methods that never go beyond the end of the output buffer\n "; static const char __pyx_k_Incompatible_checksums_s_vs_0xd4[] = "Incompatible checksums (%s vs 0xd41d8cd = ())"; static const char __pyx_k_No_decompressor_for_this_data_fo[] = "No decompressor for this data found: %r."; static const char __pyx_k_zstd_compression_decompression_p[] = "zstd compression / decompression (pypi: zstandard, gh: python-zstandard)"; static const char __pyx_k_zstd_decompress_failed_size_mism[] = "zstd decompress failed: size mismatch"; static const char __pyx_k_zstd_get_size_failed_data_was_no[] = "zstd get size failed: data was not compressed by zstd"; static const char __pyx_k_zstd_get_size_failed_original_si[] = "zstd get size failed: original size unknown"; static PyObject *__pyx_kp_u_1_1_06; static PyObject *__pyx_n_s_API_VERSION; static PyObject *__pyx_n_s_Auto; static PyObject *__pyx_n_s_Auto___init; static PyObject *__pyx_n_s_Auto__decide; static PyObject *__pyx_n_s_Auto_compress; static PyObject *__pyx_n_s_Auto_decide; static PyObject *__pyx_n_s_Auto_decompress; static PyObject *__pyx_n_s_Auto_detect; static PyObject *__pyx_n_s_Buffer; static PyObject *__pyx_n_s_CHECK_NONE; static PyObject *__pyx_n_s_CNONE; static PyObject *__pyx_n_s_CNONE_compress; static PyObject *__pyx_n_s_CNONE_decompress; static PyObject *__pyx_n_s_COMPRESSOR_LIST; static PyObject *__pyx_n_s_COMPRESSOR_TABLE; static PyObject *__pyx_n_s_CompressionSpec; static PyObject *__pyx_n_s_CompressionSpec___init; static PyObject *__pyx_n_s_CompressionSpec_compressor; static PyObject *__pyx_n_s_Compressor; static PyObject *__pyx_n_s_CompressorBase; static PyObject *__pyx_n_s_Compressor___init; static PyObject *__pyx_n_s_Compressor_compress; static PyObject *__pyx_n_s_Compressor_decompress; static PyObject *__pyx_n_s_Compressor_detect; static PyObject *__pyx_n_s_DecompressionError; static PyObject *__pyx_n_s_ID; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xd4; static PyObject *__pyx_n_s_LZ4; static PyObject *__pyx_n_s_LZ4___init; static PyObject *__pyx_n_s_LZ4_compress; static PyObject *__pyx_n_s_LZ4_decompress; static PyObject *__pyx_n_s_LZMA; static PyObject *__pyx_n_s_LZMAError; static PyObject *__pyx_n_s_LZMA___init; static PyObject *__pyx_n_s_LZMA_compress; static PyObject *__pyx_n_s_LZMA_decompress; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_n_u_MemoryError; static PyObject *__pyx_kp_s_Meta_Compressor_that_decides_wh; static PyObject *__pyx_kp_u_No_decompressor_for_this_data_fo; static PyObject *__pyx_kp_u_No_lzma_support_found; static PyObject *__pyx_n_s_NotImplementedError; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_ZLIB; static PyObject *__pyx_n_s_ZLIB___init; static PyObject *__pyx_n_s_ZLIB_compress; static PyObject *__pyx_n_s_ZLIB_decompress; static PyObject *__pyx_n_s_ZLIB_detect; static PyObject *__pyx_n_s_ZSTD; static PyObject *__pyx_n_s_ZSTD___init; static PyObject *__pyx_n_s_ZSTD_compress; static PyObject *__pyx_n_s_ZSTD_decompress; static PyObject *__pyx_kp_b__13; static PyObject *__pyx_kp_b__20; static PyObject *__pyx_kp_b__28; static PyObject *__pyx_kp_b__36; static PyObject *__pyx_kp_u__5; static PyObject *__pyx_kp_b__7; static PyObject *__pyx_kp_b__8; static PyObject *__pyx_n_u_auto; static PyObject *__pyx_n_u_baseclass; static PyObject *__pyx_n_s_borg_compress; static PyObject *__pyx_n_s_buf; static PyObject *__pyx_n_s_buffer; static PyObject *__pyx_n_s_check; static PyObject *__pyx_n_s_check_ok; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_cls; static PyObject *__pyx_n_s_cmf; static PyObject *__pyx_n_s_compress; static PyObject *__pyx_kp_s_compresses_using_a_compressor_w; static PyObject *__pyx_n_s_compression; static PyObject *__pyx_n_s_compressor; static PyObject *__pyx_n_s_compressor_cls; static PyObject *__pyx_n_s_count; static PyObject *__pyx_n_s_data; static PyObject *__pyx_n_s_decide; static PyObject *__pyx_n_s_decide_2; static PyObject *__pyx_n_s_decompress; static PyObject *__pyx_n_s_dest; static PyObject *__pyx_n_s_detect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_doc; static PyObject *__pyx_n_s_e; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_exp_compressed_data; static PyObject *__pyx_n_s_flg; static PyObject *__pyx_n_s_get; static PyObject *__pyx_n_s_get_compressor; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_n_s_hdr; static PyObject *__pyx_n_s_helpers; static PyObject *__pyx_n_s_idata; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_init; static PyObject *__pyx_n_s_inner; static PyObject *__pyx_n_s_is_deflate; static PyObject *__pyx_n_s_isize; static PyObject *__pyx_n_s_kwargs; static PyObject *__pyx_n_s_level; static PyObject *__pyx_n_s_lz4; static PyObject *__pyx_n_u_lz4; static PyObject *__pyx_kp_u_lz4_compress_failed; static PyObject *__pyx_n_s_lz4_data; static PyObject *__pyx_kp_u_lz4_decompress_failed; static PyObject *__pyx_n_s_lzma; static PyObject *__pyx_n_u_lzma; static PyObject *__pyx_kp_s_lzma_compression_decompression; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_metaclass; static PyObject *__pyx_n_s_module; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_new; static PyObject *__pyx_n_s_none; static PyObject *__pyx_n_u_none; static PyObject *__pyx_kp_s_none_no_compression_just_pass_t; static PyObject *__pyx_n_u_null; static PyObject *__pyx_n_s_osize; static PyObject *__pyx_n_s_params; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_prepare; static PyObject *__pyx_n_s_preset; static PyObject *__pyx_n_s_property; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_CompressorBase; static PyObject *__pyx_n_s_qualname; static PyObject *__pyx_n_s_ratio; static PyObject *__pyx_kp_s_raw_LZ4_compression_decompressi; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_rsize; static PyObject *__pyx_n_s_s; static PyObject *__pyx_n_s_self; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_source; static PyObject *__pyx_n_s_split; static PyObject *__pyx_kp_s_src_borg_compress_pyx; static PyObject *__pyx_n_s_startswith; static PyObject *__pyx_n_s_staticmethod; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_super; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_uncompressed_data; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_values; static PyObject *__pyx_n_s_zlib; static PyObject *__pyx_n_u_zlib; static PyObject *__pyx_kp_s_zlib_compression_decompression; static PyObject *__pyx_n_u_zstd; static PyObject *__pyx_kp_u_zstd_compress_failed_s; static PyObject *__pyx_kp_s_zstd_compression_decompression_p; static PyObject *__pyx_kp_u_zstd_decompress_failed_s; static PyObject *__pyx_kp_u_zstd_decompress_failed_size_mism; static PyObject *__pyx_kp_u_zstd_get_size_failed_data_was_no; static PyObject *__pyx_kp_u_zstd_get_size_failed_original_si; static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_detect(PyTypeObject *__pyx_v_cls, PyObject *__pyx_v_data); /* proto */ static int __pyx_pf_4borg_8compress_14CompressorBase_2__init__(CYTHON_UNUSED struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_4decide(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_6compress(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_8decompress(CYTHON_UNUSED struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_10__reduce_cython__(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_12__setstate_cython__(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_4borg_8compress_5CNONE_compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_5CNONE_2decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_3LZ4___init__(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_4borg_8compress_3LZ4_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata); /* proto */ static PyObject *__pyx_pf_4borg_8compress_3LZ4_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4LZMA___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_level, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4LZMA_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4LZMA_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4ZSTD___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_level, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4ZSTD_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4ZSTD_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4ZLIB_detect(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_cls, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4ZLIB_2__init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_level, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4ZLIB_4compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4ZLIB_6decompress(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4Auto___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_compressor); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4Auto_2_decide(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4Auto_4decide(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4Auto_6compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4Auto_8decompress(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_4Auto_10detect(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_cls, CYTHON_UNUSED PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_get_compressor(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_name, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_4borg_8compress_10Compressor___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_name, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_4borg_8compress_10Compressor_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_10Compressor_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_10Compressor_6detect(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_8compress_15CompressionSpec___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_s); /* proto */ static PyObject *__pyx_pf_4borg_8compress_15CompressionSpec_2compressor(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_8compress_2__pyx_unpickle_CompressorBase(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_4borg_8compress_CompressorBase(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_3; static PyObject *__pyx_int_6; static PyObject *__pyx_int_8; static PyObject *__pyx_int_9; static PyObject *__pyx_int_15; static PyObject *__pyx_int_22; static PyObject *__pyx_int_31; static PyObject *__pyx_int_256; static PyObject *__pyx_int_222419149; static PyObject *__pyx_slice_; static PyObject *__pyx_slice__4; static PyObject *__pyx_slice__6; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__32; static PyObject *__pyx_tuple__34; static PyObject *__pyx_tuple__37; static PyObject *__pyx_tuple__39; static PyObject *__pyx_tuple__41; static PyObject *__pyx_tuple__42; static PyObject *__pyx_tuple__44; static PyObject *__pyx_tuple__46; static PyObject *__pyx_tuple__48; static PyObject *__pyx_tuple__50; static PyObject *__pyx_tuple__52; static PyObject *__pyx_tuple__54; static PyObject *__pyx_tuple__56; static PyObject *__pyx_tuple__58; static PyObject *__pyx_tuple__60; static PyObject *__pyx_tuple__62; static PyObject *__pyx_tuple__63; static PyObject *__pyx_tuple__65; static PyObject *__pyx_tuple__67; static PyObject *__pyx_tuple__69; static PyObject *__pyx_tuple__71; static PyObject *__pyx_tuple__73; static PyObject *__pyx_codeobj__10; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__15; static PyObject *__pyx_codeobj__17; static PyObject *__pyx_codeobj__19; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__25; static PyObject *__pyx_codeobj__27; static PyObject *__pyx_codeobj__30; static PyObject *__pyx_codeobj__33; static PyObject *__pyx_codeobj__35; static PyObject *__pyx_codeobj__38; static PyObject *__pyx_codeobj__40; static PyObject *__pyx_codeobj__43; static PyObject *__pyx_codeobj__45; static PyObject *__pyx_codeobj__47; static PyObject *__pyx_codeobj__49; static PyObject *__pyx_codeobj__51; static PyObject *__pyx_codeobj__53; static PyObject *__pyx_codeobj__55; static PyObject *__pyx_codeobj__57; static PyObject *__pyx_codeobj__59; static PyObject *__pyx_codeobj__61; static PyObject *__pyx_codeobj__64; static PyObject *__pyx_codeobj__66; static PyObject *__pyx_codeobj__68; static PyObject *__pyx_codeobj__70; static PyObject *__pyx_codeobj__72; static PyObject *__pyx_codeobj__74; /* Late includes */ /* "borg/compress.pyx":63 * * @classmethod * def detect(cls, data): # <<<<<<<<<<<<<< * return data.startswith(cls.ID) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_1detect(PyObject *__pyx_v_cls, PyObject *__pyx_v_data); /*proto*/ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_1detect(PyObject *__pyx_v_cls, PyObject *__pyx_v_data) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("detect (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_14CompressorBase_detect(((PyTypeObject*)__pyx_v_cls), ((PyObject *)__pyx_v_data)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_detect(PyTypeObject *__pyx_v_cls, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("detect", 0); /* "borg/compress.pyx":64 * @classmethod * def detect(cls, data): * return data.startswith(cls.ID) # <<<<<<<<<<<<<< * * def __init__(self, **kwargs): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_n_s_startswith); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cls), __pyx_n_s_ID); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":63 * * @classmethod * def detect(cls, data): # <<<<<<<<<<<<<< * return data.startswith(cls.ID) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.compress.CompressorBase.detect", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":66 * return data.startswith(cls.ID) * * def __init__(self, **kwargs): # <<<<<<<<<<<<<< * pass * */ /* Python wrapper */ static int __pyx_pw_4borg_8compress_14CompressorBase_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4borg_8compress_14CompressorBase_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_kwargs = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;} if (unlikely(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__init__", 1))) return -1; __pyx_r = __pyx_pf_4borg_8compress_14CompressorBase_2__init__(((struct __pyx_obj_4borg_8compress_CompressorBase *)__pyx_v_self), __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_8compress_14CompressorBase_2__init__(CYTHON_UNUSED struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_kwargs) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":69 * pass * * def decide(self, data): # <<<<<<<<<<<<<< * """ * Return which compressor will perform the actual compression for *data*. */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_5decide(PyObject *__pyx_v_self, PyObject *__pyx_v_data); /*proto*/ static char __pyx_doc_4borg_8compress_14CompressorBase_4decide[] = "\n Return which compressor will perform the actual compression for *data*.\n\n This exists for a very specific case: If borg recreate is instructed to recompress\n using Auto compression it needs to determine the _actual_ target compression of a chunk\n in order to detect whether it should be recompressed.\n\n For all Compressors that are not Auto this always returns *self*.\n "; static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_5decide(PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decide (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_14CompressorBase_4decide(((struct __pyx_obj_4borg_8compress_CompressorBase *)__pyx_v_self), ((PyObject *)__pyx_v_data)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_4decide(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decide", 0); /* "borg/compress.pyx":79 * For all Compressors that are not Auto this always returns *self*. * """ * return self # <<<<<<<<<<<<<< * * def compress(self, data): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "borg/compress.pyx":69 * pass * * def decide(self, data): # <<<<<<<<<<<<<< * """ * Return which compressor will perform the actual compression for *data*. */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":81 * return self * * def compress(self, data): # <<<<<<<<<<<<<< * """ * Compress *data* (bytes) and return bytes result. Prepend the ID bytes of this compressor, */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_7compress(PyObject *__pyx_v_self, PyObject *__pyx_v_data); /*proto*/ static char __pyx_doc_4borg_8compress_14CompressorBase_6compress[] = "\n Compress *data* (bytes) and return bytes result. Prepend the ID bytes of this compressor,\n which is needed so that the correct decompressor can be used for decompression.\n "; static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_7compress(PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_14CompressorBase_6compress(((struct __pyx_obj_4borg_8compress_CompressorBase *)__pyx_v_self), ((PyObject *)__pyx_v_data)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_6compress(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); /* "borg/compress.pyx":87 * """ * # add ID bytes * return self.ID + data # <<<<<<<<<<<<<< * * def decompress(self, data): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_ID); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_v_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "borg/compress.pyx":81 * return self * * def compress(self, data): # <<<<<<<<<<<<<< * """ * Compress *data* (bytes) and return bytes result. Prepend the ID bytes of this compressor, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.compress.CompressorBase.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":89 * return self.ID + data * * def decompress(self, data): # <<<<<<<<<<<<<< * """ * Decompress *data* (bytes) and return bytes result. The leading Compressor ID */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_9decompress(PyObject *__pyx_v_self, PyObject *__pyx_v_data); /*proto*/ static char __pyx_doc_4borg_8compress_14CompressorBase_8decompress[] = "\n Decompress *data* (bytes) and return bytes result. The leading Compressor ID\n bytes need to be present.\n\n Only handles input generated by _this_ Compressor - for a general purpose\n decompression method see *Compressor.decompress*.\n "; static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_9decompress(PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_14CompressorBase_8decompress(((struct __pyx_obj_4borg_8compress_CompressorBase *)__pyx_v_self), ((PyObject *)__pyx_v_data)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_8decompress(CYTHON_UNUSED struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); /* "borg/compress.pyx":98 * """ * # strip ID bytes * return data[2:] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_data, 2, 0, NULL, NULL, &__pyx_slice_, 1, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":89 * return self.ID + data * * def decompress(self, data): # <<<<<<<<<<<<<< * """ * Decompress *data* (bytes) and return bytes result. The leading Compressor ID */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.compress.CompressorBase.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_14CompressorBase_10__reduce_cython__(((struct __pyx_obj_4borg_8compress_CompressorBase *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_10__reduce_cython__(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = () # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __Pyx_INCREF(__pyx_empty_tuple); __pyx_v_state = __pyx_empty_tuple; /* "(tree fragment)":6 * cdef bint use_setstate * state = () * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = () * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = False */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = () * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = False # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, None), state */ /*else*/ { __pyx_v_use_setstate = 0; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = False * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = False * if use_setstate: * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_CompressorBase); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_222419149); __Pyx_GIVEREF(__pyx_int_222419149); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_222419149); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = False * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, None), state * else: * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_CompressorBase__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_CompressorBase); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_222419149); __Pyx_GIVEREF(__pyx_int_222419149); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_222419149); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.compress.CompressorBase.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_CompressorBase__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_8compress_14CompressorBase_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_14CompressorBase_12__setstate_cython__(((struct __pyx_obj_4borg_8compress_CompressorBase *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_14CompressorBase_12__setstate_cython__(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_CompressorBase__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_f_4borg_8compress___pyx_unpickle_CompressorBase__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_CompressorBase, (type(self), 0xd41d8cd, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_CompressorBase__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.compress.CompressorBase.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":108 * name = 'none' * * def compress(self, data): # <<<<<<<<<<<<<< * return super().compress(data) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_5CNONE_1compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_5CNONE_1compress = {"compress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_5CNONE_1compress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_5CNONE_1compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, 1); __PYX_ERR(0, 108, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compress") < 0)) __PYX_ERR(0, 108, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 108, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.CNONE.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_5CNONE_compress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_5CNONE_compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); /* "borg/compress.pyx":109 * * def compress(self, data): * return super().compress(data) # <<<<<<<<<<<<<< * * def decompress(self, data): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_2) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 109, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_compress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_data); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":108 * name = 'none' * * def compress(self, data): # <<<<<<<<<<<<<< * return super().compress(data) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.CNONE.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":111 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * if not isinstance(data, bytes): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_5CNONE_3decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_5CNONE_3decompress = {"decompress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_5CNONE_3decompress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_5CNONE_3decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, 1); __PYX_ERR(0, 111, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decompress") < 0)) __PYX_ERR(0, 111, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 111, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.CNONE.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_5CNONE_2decompress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_5CNONE_2decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); __Pyx_INCREF(__pyx_v_data); /* "borg/compress.pyx":112 * * def decompress(self, data): * data = super().decompress(data) # <<<<<<<<<<<<<< * if not isinstance(data, bytes): * data = bytes(data) */ __pyx_t_2 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_2) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 112, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_decompress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_data); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":113 * def decompress(self, data): * data = super().decompress(data) * if not isinstance(data, bytes): # <<<<<<<<<<<<<< * data = bytes(data) * return data */ __pyx_t_4 = PyBytes_Check(__pyx_v_data); __pyx_t_5 = ((!(__pyx_t_4 != 0)) != 0); if (__pyx_t_5) { /* "borg/compress.pyx":114 * data = super().decompress(data) * if not isinstance(data, bytes): * data = bytes(data) # <<<<<<<<<<<<<< * return data * */ __pyx_t_1 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 114, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":113 * def decompress(self, data): * data = super().decompress(data) * if not isinstance(data, bytes): # <<<<<<<<<<<<<< * data = bytes(data) * return data */ } /* "borg/compress.pyx":115 * if not isinstance(data, bytes): * data = bytes(data) * return data # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_data); __pyx_r = __pyx_v_data; goto __pyx_L0; /* "borg/compress.pyx":111 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * if not isinstance(data, bytes): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.CNONE.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_data); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":130 * name = 'lz4' * * def __init__(self, **kwargs): # <<<<<<<<<<<<<< * pass * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_3LZ4_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_3LZ4_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_3LZ4_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_3LZ4_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; CYTHON_UNUSED PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 130, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_self = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 130, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("borg.compress.LZ4.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_3LZ4___init__(__pyx_self, __pyx_v_self, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_3LZ4___init__(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_kwargs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":133 * pass * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_3LZ4_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_3LZ4_3compress = {"compress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_3LZ4_3compress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_3LZ4_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_idata = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_idata,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_idata)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, 1); __PYX_ERR(0, 133, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compress") < 0)) __PYX_ERR(0, 133, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_idata = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 133, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.LZ4.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_3LZ4_2compress(__pyx_self, __pyx_v_self, __pyx_v_idata); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_3LZ4_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata) { int __pyx_v_isize; int __pyx_v_osize; char *__pyx_v_source; char *__pyx_v_dest; PyObject *__pyx_v_buf = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; char *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); __Pyx_INCREF(__pyx_v_idata); /* "borg/compress.pyx":134 * * def compress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * cdef int isize = len(idata) */ __pyx_t_1 = PyBytes_Check(__pyx_v_idata); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "borg/compress.pyx":135 * def compress(self, idata): * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview # <<<<<<<<<<<<<< * cdef int isize = len(idata) * cdef int osize */ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_idata); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_idata, __pyx_t_3); __pyx_t_3 = 0; /* "borg/compress.pyx":134 * * def compress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * cdef int isize = len(idata) */ } /* "borg/compress.pyx":136 * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview * cdef int isize = len(idata) # <<<<<<<<<<<<<< * cdef int osize * cdef char *source = idata */ __pyx_t_4 = PyObject_Length(__pyx_v_idata); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 136, __pyx_L1_error) __pyx_v_isize = __pyx_t_4; /* "borg/compress.pyx":138 * cdef int isize = len(idata) * cdef int osize * cdef char *source = idata # <<<<<<<<<<<<<< * cdef char *dest * osize = LZ4_compressBound(isize) */ __pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_idata); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L1_error) __pyx_v_source = __pyx_t_5; /* "borg/compress.pyx":140 * cdef char *source = idata * cdef char *dest * osize = LZ4_compressBound(isize) # <<<<<<<<<<<<<< * buf = buffer.get(osize) * dest = buf */ __pyx_v_osize = LZ4_compressBound(__pyx_v_isize); /* "borg/compress.pyx":141 * cdef char *dest * osize = LZ4_compressBound(isize) * buf = buffer.get(osize) # <<<<<<<<<<<<<< * dest = buf * osize = LZ4_compress_default(source, dest, isize, osize) */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_buffer); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 141, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_get); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 141, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_osize); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 141, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_3 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_buf = __pyx_t_3; __pyx_t_3 = 0; /* "borg/compress.pyx":142 * osize = LZ4_compressBound(isize) * buf = buffer.get(osize) * dest = buf # <<<<<<<<<<<<<< * osize = LZ4_compress_default(source, dest, isize, osize) * if not osize: */ __pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 142, __pyx_L1_error) __pyx_v_dest = ((char *)__pyx_t_5); /* "borg/compress.pyx":143 * buf = buffer.get(osize) * dest = buf * osize = LZ4_compress_default(source, dest, isize, osize) # <<<<<<<<<<<<<< * if not osize: * raise Exception('lz4 compress failed') */ __pyx_v_osize = LZ4_compress_default(__pyx_v_source, __pyx_v_dest, __pyx_v_isize, __pyx_v_osize); /* "borg/compress.pyx":144 * dest = buf * osize = LZ4_compress_default(source, dest, isize, osize) * if not osize: # <<<<<<<<<<<<<< * raise Exception('lz4 compress failed') * return super().compress(dest[:osize]) */ __pyx_t_2 = ((!(__pyx_v_osize != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "borg/compress.pyx":145 * osize = LZ4_compress_default(source, dest, isize, osize) * if not osize: * raise Exception('lz4 compress failed') # <<<<<<<<<<<<<< * return super().compress(dest[:osize]) * */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 145, __pyx_L1_error) /* "borg/compress.pyx":144 * dest = buf * osize = LZ4_compress_default(source, dest, isize, osize) * if not osize: # <<<<<<<<<<<<<< * raise Exception('lz4 compress failed') * return super().compress(dest[:osize]) */ } /* "borg/compress.pyx":146 * if not osize: * raise Exception('lz4 compress failed') * return super().compress(dest[:osize]) # <<<<<<<<<<<<<< * * def decompress(self, idata): */ __Pyx_XDECREF(__pyx_r); __pyx_t_7 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_7) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 146, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_7); __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_self); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_compress); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_dest + 0, __pyx_v_osize - 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_3 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_8, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "borg/compress.pyx":133 * pass * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("borg.compress.LZ4.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_buf); __Pyx_XDECREF(__pyx_v_idata); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":148 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_3LZ4_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_3LZ4_5decompress = {"decompress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_3LZ4_5decompress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_3LZ4_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_idata = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_idata,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_idata)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, 1); __PYX_ERR(0, 148, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decompress") < 0)) __PYX_ERR(0, 148, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_idata = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 148, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.LZ4.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_3LZ4_4decompress(__pyx_self, __pyx_v_self, __pyx_v_idata); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_3LZ4_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata) { int __pyx_v_isize; int __pyx_v_osize; int __pyx_v_rsize; char *__pyx_v_source; char *__pyx_v_dest; PyObject *__pyx_v_buf = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; long __pyx_t_8; PyObject *__pyx_t_9 = NULL; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; PyObject *__pyx_t_13 = NULL; PyObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); __Pyx_INCREF(__pyx_v_idata); /* "borg/compress.pyx":149 * * def decompress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) */ __pyx_t_1 = PyBytes_Check(__pyx_v_idata); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "borg/compress.pyx":150 * def decompress(self, idata): * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview # <<<<<<<<<<<<<< * idata = super().decompress(idata) * cdef int isize = len(idata) */ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_idata); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_idata, __pyx_t_3); __pyx_t_3 = 0; /* "borg/compress.pyx":149 * * def decompress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) */ } /* "borg/compress.pyx":151 * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) # <<<<<<<<<<<<<< * cdef int isize = len(idata) * cdef int osize */ __pyx_t_4 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_4) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 151, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_self); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_decompress); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_v_idata) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_idata); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_idata, __pyx_t_3); __pyx_t_3 = 0; /* "borg/compress.pyx":152 * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) * cdef int isize = len(idata) # <<<<<<<<<<<<<< * cdef int osize * cdef int rsize */ __pyx_t_6 = PyObject_Length(__pyx_v_idata); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 152, __pyx_L1_error) __pyx_v_isize = __pyx_t_6; /* "borg/compress.pyx":155 * cdef int osize * cdef int rsize * cdef char *source = idata # <<<<<<<<<<<<<< * cdef char *dest * # a bit more than 8MB is enough for the usual data sizes yielded by the chunker. */ __pyx_t_7 = __Pyx_PyObject_AsWritableString(__pyx_v_idata); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L1_error) __pyx_v_source = __pyx_t_7; /* "borg/compress.pyx":159 * # a bit more than 8MB is enough for the usual data sizes yielded by the chunker. * # allocate more if isize * 3 is already bigger, to avoid having to resize often. * osize = max(int(1.1 * 2**23), isize * 3) # <<<<<<<<<<<<<< * while True: * try: */ __pyx_t_8 = (__pyx_v_isize * 3); __pyx_t_3 = __Pyx_PyInt_FromDouble((1.1 * 8388608.0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_long(__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_GT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__pyx_t_2) { __pyx_t_9 = __Pyx_PyInt_From_long(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __pyx_t_9; __pyx_t_9 = 0; } else { __Pyx_INCREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_5); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_osize = __pyx_t_10; /* "borg/compress.pyx":160 * # allocate more if isize * 3 is already bigger, to avoid having to resize often. * osize = max(int(1.1 * 2**23), isize * 3) * while True: # <<<<<<<<<<<<<< * try: * buf = buffer.get(osize) */ while (1) { /* "borg/compress.pyx":161 * osize = max(int(1.1 * 2**23), isize * 3) * while True: * try: # <<<<<<<<<<<<<< * buf = buffer.get(osize) * except MemoryError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __Pyx_XGOTREF(__pyx_t_13); /*try:*/ { /* "borg/compress.pyx":162 * while True: * try: * buf = buffer.get(osize) # <<<<<<<<<<<<<< * except MemoryError: * raise DecompressionError('MemoryError') */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_buffer); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_get); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 162, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_osize); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } __pyx_t_5 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 162, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF_SET(__pyx_v_buf, __pyx_t_5); __pyx_t_5 = 0; /* "borg/compress.pyx":161 * osize = max(int(1.1 * 2**23), isize * 3) * while True: * try: # <<<<<<<<<<<<<< * buf = buffer.get(osize) * except MemoryError: */ } __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; goto __pyx_L13_try_end; __pyx_L6_error:; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":163 * try: * buf = buffer.get(osize) * except MemoryError: # <<<<<<<<<<<<<< * raise DecompressionError('MemoryError') * dest = buf */ __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_MemoryError); if (__pyx_t_10) { __Pyx_AddTraceback("borg.compress.LZ4.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_9, &__pyx_t_3) < 0) __PYX_ERR(0, 163, __pyx_L8_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_3); /* "borg/compress.pyx":164 * buf = buffer.get(osize) * except MemoryError: * raise DecompressionError('MemoryError') # <<<<<<<<<<<<<< * dest = buf * rsize = LZ4_decompress_safe(source, dest, isize, osize) */ __Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 164, __pyx_L8_except_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_15 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_14))) { __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_14); if (likely(__pyx_t_15)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14); __Pyx_INCREF(__pyx_t_15); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_14, function); } } __pyx_t_4 = (__pyx_t_15) ? __Pyx_PyObject_Call2Args(__pyx_t_14, __pyx_t_15, __pyx_n_u_MemoryError) : __Pyx_PyObject_CallOneArg(__pyx_t_14, __pyx_n_u_MemoryError); __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 164, __pyx_L8_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 164, __pyx_L8_except_error) } goto __pyx_L8_except_error; __pyx_L8_except_error:; /* "borg/compress.pyx":161 * osize = max(int(1.1 * 2**23), isize * 3) * while True: * try: # <<<<<<<<<<<<<< * buf = buffer.get(osize) * except MemoryError: */ __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_XGIVEREF(__pyx_t_13); __Pyx_ExceptionReset(__pyx_t_11, __pyx_t_12, __pyx_t_13); goto __pyx_L1_error; __pyx_L13_try_end:; } /* "borg/compress.pyx":165 * except MemoryError: * raise DecompressionError('MemoryError') * dest = buf # <<<<<<<<<<<<<< * rsize = LZ4_decompress_safe(source, dest, isize, osize) * if rsize >= 0: */ __pyx_t_7 = __Pyx_PyObject_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L1_error) __pyx_v_dest = ((char *)__pyx_t_7); /* "borg/compress.pyx":166 * raise DecompressionError('MemoryError') * dest = buf * rsize = LZ4_decompress_safe(source, dest, isize, osize) # <<<<<<<<<<<<<< * if rsize >= 0: * break */ __pyx_v_rsize = LZ4_decompress_safe(__pyx_v_source, __pyx_v_dest, __pyx_v_isize, __pyx_v_osize); /* "borg/compress.pyx":167 * dest = buf * rsize = LZ4_decompress_safe(source, dest, isize, osize) * if rsize >= 0: # <<<<<<<<<<<<<< * break * if osize > 2 ** 27: # 128MiB (should be enough, considering max. repo obj size and very good compression) */ __pyx_t_2 = ((__pyx_v_rsize >= 0) != 0); if (__pyx_t_2) { /* "borg/compress.pyx":168 * rsize = LZ4_decompress_safe(source, dest, isize, osize) * if rsize >= 0: * break # <<<<<<<<<<<<<< * if osize > 2 ** 27: # 128MiB (should be enough, considering max. repo obj size and very good compression) * # this is insane, get out of here */ goto __pyx_L5_break; /* "borg/compress.pyx":167 * dest = buf * rsize = LZ4_decompress_safe(source, dest, isize, osize) * if rsize >= 0: # <<<<<<<<<<<<<< * break * if osize > 2 ** 27: # 128MiB (should be enough, considering max. repo obj size and very good compression) */ } /* "borg/compress.pyx":169 * if rsize >= 0: * break * if osize > 2 ** 27: # 128MiB (should be enough, considering max. repo obj size and very good compression) # <<<<<<<<<<<<<< * # this is insane, get out of here * raise DecompressionError('lz4 decompress failed') */ __pyx_t_2 = ((__pyx_v_osize > 0x8000000) != 0); if (unlikely(__pyx_t_2)) { /* "borg/compress.pyx":171 * if osize > 2 ** 27: # 128MiB (should be enough, considering max. repo obj size and very good compression) * # this is insane, get out of here * raise DecompressionError('lz4 decompress failed') # <<<<<<<<<<<<<< * # likely the buffer was too small, get a bigger one: * osize = int(1.5 * osize) */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_5, __pyx_kp_u_lz4_decompress_failed) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_kp_u_lz4_decompress_failed); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 171, __pyx_L1_error) /* "borg/compress.pyx":169 * if rsize >= 0: * break * if osize > 2 ** 27: # 128MiB (should be enough, considering max. repo obj size and very good compression) # <<<<<<<<<<<<<< * # this is insane, get out of here * raise DecompressionError('lz4 decompress failed') */ } /* "borg/compress.pyx":173 * raise DecompressionError('lz4 decompress failed') * # likely the buffer was too small, get a bigger one: * osize = int(1.5 * osize) # <<<<<<<<<<<<<< * return dest[:rsize] * */ __pyx_v_osize = ((int)(1.5 * __pyx_v_osize)); } __pyx_L5_break:; /* "borg/compress.pyx":174 * # likely the buffer was too small, get a bigger one: * osize = int(1.5 * osize) * return dest[:rsize] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_dest + 0, __pyx_v_rsize - 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "borg/compress.pyx":148 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_14); __Pyx_XDECREF(__pyx_t_15); __Pyx_AddTraceback("borg.compress.LZ4.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_buf); __Pyx_XDECREF(__pyx_v_idata); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":184 * name = 'lzma' * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4LZMA_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4LZMA_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4LZMA_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4LZMA_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_level = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_level,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)((PyObject *)__pyx_int_6)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_level); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 184, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_level = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 184, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("borg.compress.LZMA.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4LZMA___init__(__pyx_self, __pyx_v_self, __pyx_v_level, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4LZMA___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_level, PyObject *__pyx_v_kwargs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "borg/compress.pyx":185 * * def __init__(self, level=6, **kwargs): * super().__init__(**kwargs) # <<<<<<<<<<<<<< * self.level = level * if lzma is None: */ __pyx_t_1 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_1) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 185, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_self); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_v_kwargs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":186 * def __init__(self, level=6, **kwargs): * super().__init__(**kwargs) * self.level = level # <<<<<<<<<<<<<< * if lzma is None: * raise ValueError('No lzma support found.') */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_level, __pyx_v_level) < 0) __PYX_ERR(0, 186, __pyx_L1_error) /* "borg/compress.pyx":187 * super().__init__(**kwargs) * self.level = level * if lzma is None: # <<<<<<<<<<<<<< * raise ValueError('No lzma support found.') * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_lzma); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_t_1 == Py_None); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = (__pyx_t_3 != 0); if (unlikely(__pyx_t_4)) { /* "borg/compress.pyx":188 * self.level = level * if lzma is None: * raise ValueError('No lzma support found.') # <<<<<<<<<<<<<< * * def compress(self, data): */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 188, __pyx_L1_error) /* "borg/compress.pyx":187 * super().__init__(**kwargs) * self.level = level * if lzma is None: # <<<<<<<<<<<<<< * raise ValueError('No lzma support found.') * */ } /* "borg/compress.pyx":184 * name = 'lzma' * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.compress.LZMA.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":190 * raise ValueError('No lzma support found.') * * def compress(self, data): # <<<<<<<<<<<<<< * # we do not need integrity checks in lzma, we do that already * data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4LZMA_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4LZMA_3compress = {"compress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4LZMA_3compress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4LZMA_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, 1); __PYX_ERR(0, 190, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compress") < 0)) __PYX_ERR(0, 190, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 190, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.LZMA.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4LZMA_2compress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4LZMA_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); __Pyx_INCREF(__pyx_v_data); /* "borg/compress.pyx":192 * def compress(self, data): * # we do not need integrity checks in lzma, we do that already * data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE) # <<<<<<<<<<<<<< * return super().compress(data) * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_lzma); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_compress); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_data); __Pyx_GIVEREF(__pyx_v_data); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_data); __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_level); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_preset, __pyx_t_4) < 0) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_lzma); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_CHECK_NONE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_check, __pyx_t_5) < 0) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_5); __pyx_t_5 = 0; /* "borg/compress.pyx":193 * # we do not need integrity checks in lzma, we do that already * data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE) * return super().compress(data) # <<<<<<<<<<<<<< * * def decompress(self, data): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_3) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 193, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_compress); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_5 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_data); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "borg/compress.pyx":190 * raise ValueError('No lzma support found.') * * def compress(self, data): # <<<<<<<<<<<<<< * # we do not need integrity checks in lzma, we do that already * data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.compress.LZMA.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_data); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":195 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * try: */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4LZMA_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4LZMA_5decompress = {"decompress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4LZMA_5decompress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4LZMA_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, 1); __PYX_ERR(0, 195, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decompress") < 0)) __PYX_ERR(0, 195, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 195, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.LZMA.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4LZMA_4decompress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4LZMA_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_v_e = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_t_12; char const *__pyx_t_13; PyObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; PyObject *__pyx_t_18 = NULL; PyObject *__pyx_t_19 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); __Pyx_INCREF(__pyx_v_data); /* "borg/compress.pyx":196 * * def decompress(self, data): * data = super().decompress(data) # <<<<<<<<<<<<<< * try: * return lzma.decompress(data) */ __pyx_t_2 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_2) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 196, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_decompress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_data); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":197 * def decompress(self, data): * data = super().decompress(data) * try: # <<<<<<<<<<<<<< * return lzma.decompress(data) * except lzma.LZMAError as e: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_6); /*try:*/ { /* "borg/compress.pyx":198 * data = super().decompress(data) * try: * return lzma.decompress(data) # <<<<<<<<<<<<<< * except lzma.LZMAError as e: * raise DecompressionError(str(e)) from None */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_lzma); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 198, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_decompress); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 198, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_data); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 198, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L7_try_return; /* "borg/compress.pyx":197 * def decompress(self, data): * data = super().decompress(data) * try: # <<<<<<<<<<<<<< * return lzma.decompress(data) * except lzma.LZMAError as e: */ } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; /* "borg/compress.pyx":199 * try: * return lzma.decompress(data) * except lzma.LZMAError as e: # <<<<<<<<<<<<<< * raise DecompressionError(str(e)) from None * */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_lzma); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 199, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_LZMAError); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 199, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_9 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_8); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; if (__pyx_t_9) { __Pyx_AddTraceback("borg.compress.LZMA.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1) < 0) __PYX_ERR(0, 199, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_2); __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_t_2); __pyx_v_e = __pyx_t_2; /*try:*/ { /* "borg/compress.pyx":200 * return lzma.decompress(data) * except lzma.LZMAError as e: * raise DecompressionError(str(e)) from None # <<<<<<<<<<<<<< * * */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 200, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_10 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyUnicode_Type)), __pyx_v_e); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 200, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_8 = (__pyx_t_11) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_11, __pyx_t_10) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 200, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, Py_None); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 200, __pyx_L14_error) } /* "borg/compress.pyx":199 * try: * return lzma.decompress(data) * except lzma.LZMAError as e: # <<<<<<<<<<<<<< * raise DecompressionError(str(e)) from None * */ /*finally:*/ { __pyx_L14_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __pyx_t_17 = 0; __pyx_t_18 = 0; __pyx_t_19 = 0; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_17, &__pyx_t_18, &__pyx_t_19); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16) < 0)) __Pyx_ErrFetch(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16); __Pyx_XGOTREF(__pyx_t_14); __Pyx_XGOTREF(__pyx_t_15); __Pyx_XGOTREF(__pyx_t_16); __Pyx_XGOTREF(__pyx_t_17); __Pyx_XGOTREF(__pyx_t_18); __Pyx_XGOTREF(__pyx_t_19); __pyx_t_9 = __pyx_lineno; __pyx_t_12 = __pyx_clineno; __pyx_t_13 = __pyx_filename; { __Pyx_DECREF(__pyx_v_e); __pyx_v_e = NULL; } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_17); __Pyx_XGIVEREF(__pyx_t_18); __Pyx_XGIVEREF(__pyx_t_19); __Pyx_ExceptionReset(__pyx_t_17, __pyx_t_18, __pyx_t_19); } __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_16); __Pyx_ErrRestore(__pyx_t_14, __pyx_t_15, __pyx_t_16); __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __pyx_t_17 = 0; __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_lineno = __pyx_t_9; __pyx_clineno = __pyx_t_12; __pyx_filename = __pyx_t_13; goto __pyx_L5_except_error; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "borg/compress.pyx":197 * def decompress(self, data): * data = super().decompress(data) * try: # <<<<<<<<<<<<<< * return lzma.decompress(data) * except lzma.LZMAError as e: */ __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L1_error; __pyx_L7_try_return:; __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L0; } /* "borg/compress.pyx":195 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("borg.compress.LZMA.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_e); __Pyx_XDECREF(__pyx_v_data); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":211 * name = 'zstd' * * def __init__(self, level=3, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4ZSTD_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4ZSTD_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4ZSTD_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4ZSTD_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_level = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_level,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)((PyObject *)__pyx_int_3)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_level); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 211, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_level = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 211, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("borg.compress.ZSTD.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4ZSTD___init__(__pyx_self, __pyx_v_self, __pyx_v_level, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4ZSTD___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_level, PyObject *__pyx_v_kwargs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "borg/compress.pyx":212 * * def __init__(self, level=3, **kwargs): * super().__init__(**kwargs) # <<<<<<<<<<<<<< * self.level = level * */ __pyx_t_1 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_1) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 212, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_self); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_v_kwargs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":213 * def __init__(self, level=3, **kwargs): * super().__init__(**kwargs) * self.level = level # <<<<<<<<<<<<<< * * def compress(self, idata): */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_level, __pyx_v_level) < 0) __PYX_ERR(0, 213, __pyx_L1_error) /* "borg/compress.pyx":211 * name = 'zstd' * * def __init__(self, level=3, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.compress.ZSTD.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":215 * self.level = level * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4ZSTD_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4ZSTD_3compress = {"compress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4ZSTD_3compress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4ZSTD_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_idata = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_idata,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_idata)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, 1); __PYX_ERR(0, 215, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compress") < 0)) __PYX_ERR(0, 215, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_idata = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 215, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.ZSTD.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4ZSTD_2compress(__pyx_self, __pyx_v_self, __pyx_v_idata); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4ZSTD_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata) { int __pyx_v_isize; size_t __pyx_v_osize; char *__pyx_v_source; char *__pyx_v_dest; int __pyx_v_level; PyObject *__pyx_v_buf = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; char *__pyx_t_5; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); __Pyx_INCREF(__pyx_v_idata); /* "borg/compress.pyx":216 * * def compress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * cdef int isize = len(idata) */ __pyx_t_1 = PyBytes_Check(__pyx_v_idata); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "borg/compress.pyx":217 * def compress(self, idata): * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview # <<<<<<<<<<<<<< * cdef int isize = len(idata) * cdef size_t osize */ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_idata); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_idata, __pyx_t_3); __pyx_t_3 = 0; /* "borg/compress.pyx":216 * * def compress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * cdef int isize = len(idata) */ } /* "borg/compress.pyx":218 * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview * cdef int isize = len(idata) # <<<<<<<<<<<<<< * cdef size_t osize * cdef char *source = idata */ __pyx_t_4 = PyObject_Length(__pyx_v_idata); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 218, __pyx_L1_error) __pyx_v_isize = __pyx_t_4; /* "borg/compress.pyx":220 * cdef int isize = len(idata) * cdef size_t osize * cdef char *source = idata # <<<<<<<<<<<<<< * cdef char *dest * cdef int level = self.level */ __pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_idata); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 220, __pyx_L1_error) __pyx_v_source = __pyx_t_5; /* "borg/compress.pyx":222 * cdef char *source = idata * cdef char *dest * cdef int level = self.level # <<<<<<<<<<<<<< * osize = ZSTD_compressBound(isize) * buf = buffer.get(osize) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_level); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_level = __pyx_t_6; /* "borg/compress.pyx":223 * cdef char *dest * cdef int level = self.level * osize = ZSTD_compressBound(isize) # <<<<<<<<<<<<<< * buf = buffer.get(osize) * dest = buf */ __pyx_v_osize = ZSTD_compressBound(__pyx_v_isize); /* "borg/compress.pyx":224 * cdef int level = self.level * osize = ZSTD_compressBound(isize) * buf = buffer.get(osize) # <<<<<<<<<<<<<< * dest = buf * with nogil: */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_buffer); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_get); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyInt_FromSize_t(__pyx_v_osize); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); } } __pyx_t_3 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_v_buf = __pyx_t_3; __pyx_t_3 = 0; /* "borg/compress.pyx":225 * osize = ZSTD_compressBound(isize) * buf = buffer.get(osize) * dest = buf # <<<<<<<<<<<<<< * with nogil: * osize = ZSTD_compress(dest, osize, source, isize, level) */ __pyx_t_5 = __Pyx_PyObject_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 225, __pyx_L1_error) __pyx_v_dest = ((char *)__pyx_t_5); /* "borg/compress.pyx":226 * buf = buffer.get(osize) * dest = buf * with nogil: # <<<<<<<<<<<<<< * osize = ZSTD_compress(dest, osize, source, isize, level) * if ZSTD_isError(osize): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "borg/compress.pyx":227 * dest = buf * with nogil: * osize = ZSTD_compress(dest, osize, source, isize, level) # <<<<<<<<<<<<<< * if ZSTD_isError(osize): * raise Exception('zstd compress failed: %s' % ZSTD_getErrorName(osize)) */ __pyx_v_osize = ZSTD_compress(__pyx_v_dest, __pyx_v_osize, __pyx_v_source, __pyx_v_isize, __pyx_v_level); } /* "borg/compress.pyx":226 * buf = buffer.get(osize) * dest = buf * with nogil: # <<<<<<<<<<<<<< * osize = ZSTD_compress(dest, osize, source, isize, level) * if ZSTD_isError(osize): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L6; } __pyx_L6:; } } /* "borg/compress.pyx":228 * with nogil: * osize = ZSTD_compress(dest, osize, source, isize, level) * if ZSTD_isError(osize): # <<<<<<<<<<<<<< * raise Exception('zstd compress failed: %s' % ZSTD_getErrorName(osize)) * return super().compress(dest[:osize]) */ __pyx_t_2 = (ZSTD_isError(__pyx_v_osize) != 0); if (unlikely(__pyx_t_2)) { /* "borg/compress.pyx":229 * osize = ZSTD_compress(dest, osize, source, isize, level) * if ZSTD_isError(osize): * raise Exception('zstd compress failed: %s' % ZSTD_getErrorName(osize)) # <<<<<<<<<<<<<< * return super().compress(dest[:osize]) * */ __pyx_t_3 = __Pyx_PyBytes_FromString(ZSTD_getErrorName(__pyx_v_osize)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_zstd_compress_failed_s, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 229, __pyx_L1_error) /* "borg/compress.pyx":228 * with nogil: * osize = ZSTD_compress(dest, osize, source, isize, level) * if ZSTD_isError(osize): # <<<<<<<<<<<<<< * raise Exception('zstd compress failed: %s' % ZSTD_getErrorName(osize)) * return super().compress(dest[:osize]) */ } /* "borg/compress.pyx":230 * if ZSTD_isError(osize): * raise Exception('zstd compress failed: %s' % ZSTD_getErrorName(osize)) * return super().compress(dest[:osize]) # <<<<<<<<<<<<<< * * def decompress(self, idata): */ __Pyx_XDECREF(__pyx_r); __pyx_t_8 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_8) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 230, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_8); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_self); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_compress); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_dest + 0, __pyx_v_osize - 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_3 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_9, __pyx_t_8) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "borg/compress.pyx":215 * self.level = level * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("borg.compress.ZSTD.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_buf); __Pyx_XDECREF(__pyx_v_idata); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":232 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4ZSTD_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4ZSTD_5decompress = {"decompress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4ZSTD_5decompress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4ZSTD_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_idata = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_idata,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_idata)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, 1); __PYX_ERR(0, 232, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decompress") < 0)) __PYX_ERR(0, 232, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_idata = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 232, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.ZSTD.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4ZSTD_4decompress(__pyx_self, __pyx_v_self, __pyx_v_idata); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4ZSTD_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_idata) { int __pyx_v_isize; unsigned PY_LONG_LONG __pyx_v_osize; unsigned PY_LONG_LONG __pyx_v_rsize; char *__pyx_v_source; char *__pyx_v_dest; PyObject *__pyx_v_buf = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_t_12; PyObject *__pyx_t_13 = NULL; PyObject *__pyx_t_14 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); __Pyx_INCREF(__pyx_v_idata); /* "borg/compress.pyx":233 * * def decompress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) */ __pyx_t_1 = PyBytes_Check(__pyx_v_idata); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "borg/compress.pyx":234 * def decompress(self, idata): * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview # <<<<<<<<<<<<<< * idata = super().decompress(idata) * cdef int isize = len(idata) */ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_idata); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_idata, __pyx_t_3); __pyx_t_3 = 0; /* "borg/compress.pyx":233 * * def decompress(self, idata): * if not isinstance(idata, bytes): # <<<<<<<<<<<<<< * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) */ } /* "borg/compress.pyx":235 * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) # <<<<<<<<<<<<<< * cdef int isize = len(idata) * cdef unsigned long long osize */ __pyx_t_4 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_4) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 235, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_self); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_decompress); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_v_idata) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_idata); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_idata, __pyx_t_3); __pyx_t_3 = 0; /* "borg/compress.pyx":236 * idata = bytes(idata) # code below does not work with memoryview * idata = super().decompress(idata) * cdef int isize = len(idata) # <<<<<<<<<<<<<< * cdef unsigned long long osize * cdef unsigned long long rsize */ __pyx_t_6 = PyObject_Length(__pyx_v_idata); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 236, __pyx_L1_error) __pyx_v_isize = __pyx_t_6; /* "borg/compress.pyx":239 * cdef unsigned long long osize * cdef unsigned long long rsize * cdef char *source = idata # <<<<<<<<<<<<<< * cdef char *dest * osize = ZSTD_getFrameContentSize(source, isize) */ __pyx_t_7 = __Pyx_PyObject_AsWritableString(__pyx_v_idata); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(0, 239, __pyx_L1_error) __pyx_v_source = __pyx_t_7; /* "borg/compress.pyx":241 * cdef char *source = idata * cdef char *dest * osize = ZSTD_getFrameContentSize(source, isize) # <<<<<<<<<<<<<< * if osize == ZSTD_CONTENTSIZE_ERROR: * raise DecompressionError('zstd get size failed: data was not compressed by zstd') */ __pyx_v_osize = ZSTD_getFrameContentSize(__pyx_v_source, __pyx_v_isize); /* "borg/compress.pyx":242 * cdef char *dest * osize = ZSTD_getFrameContentSize(source, isize) * if osize == ZSTD_CONTENTSIZE_ERROR: # <<<<<<<<<<<<<< * raise DecompressionError('zstd get size failed: data was not compressed by zstd') * if osize == ZSTD_CONTENTSIZE_UNKNOWN: */ __pyx_t_2 = ((__pyx_v_osize == ZSTD_CONTENTSIZE_ERROR) != 0); if (unlikely(__pyx_t_2)) { /* "borg/compress.pyx":243 * osize = ZSTD_getFrameContentSize(source, isize) * if osize == ZSTD_CONTENTSIZE_ERROR: * raise DecompressionError('zstd get size failed: data was not compressed by zstd') # <<<<<<<<<<<<<< * if osize == ZSTD_CONTENTSIZE_UNKNOWN: * raise DecompressionError('zstd get size failed: original size unknown') */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_kp_u_zstd_get_size_failed_data_was_no) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_kp_u_zstd_get_size_failed_data_was_no); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 243, __pyx_L1_error) /* "borg/compress.pyx":242 * cdef char *dest * osize = ZSTD_getFrameContentSize(source, isize) * if osize == ZSTD_CONTENTSIZE_ERROR: # <<<<<<<<<<<<<< * raise DecompressionError('zstd get size failed: data was not compressed by zstd') * if osize == ZSTD_CONTENTSIZE_UNKNOWN: */ } /* "borg/compress.pyx":244 * if osize == ZSTD_CONTENTSIZE_ERROR: * raise DecompressionError('zstd get size failed: data was not compressed by zstd') * if osize == ZSTD_CONTENTSIZE_UNKNOWN: # <<<<<<<<<<<<<< * raise DecompressionError('zstd get size failed: original size unknown') * try: */ __pyx_t_2 = ((__pyx_v_osize == ZSTD_CONTENTSIZE_UNKNOWN) != 0); if (unlikely(__pyx_t_2)) { /* "borg/compress.pyx":245 * raise DecompressionError('zstd get size failed: data was not compressed by zstd') * if osize == ZSTD_CONTENTSIZE_UNKNOWN: * raise DecompressionError('zstd get size failed: original size unknown') # <<<<<<<<<<<<<< * try: * buf = buffer.get(osize) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_kp_u_zstd_get_size_failed_original_si) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_kp_u_zstd_get_size_failed_original_si); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 245, __pyx_L1_error) /* "borg/compress.pyx":244 * if osize == ZSTD_CONTENTSIZE_ERROR: * raise DecompressionError('zstd get size failed: data was not compressed by zstd') * if osize == ZSTD_CONTENTSIZE_UNKNOWN: # <<<<<<<<<<<<<< * raise DecompressionError('zstd get size failed: original size unknown') * try: */ } /* "borg/compress.pyx":246 * if osize == ZSTD_CONTENTSIZE_UNKNOWN: * raise DecompressionError('zstd get size failed: original size unknown') * try: # <<<<<<<<<<<<<< * buf = buffer.get(osize) * except MemoryError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); /*try:*/ { /* "borg/compress.pyx":247 * raise DecompressionError('zstd get size failed: original size unknown') * try: * buf = buffer.get(osize) # <<<<<<<<<<<<<< * except MemoryError: * raise DecompressionError('MemoryError') */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_buffer); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 247, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_get); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 247, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_osize); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 247, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_11) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_11, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 247, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_buf = __pyx_t_3; __pyx_t_3 = 0; /* "borg/compress.pyx":246 * if osize == ZSTD_CONTENTSIZE_UNKNOWN: * raise DecompressionError('zstd get size failed: original size unknown') * try: # <<<<<<<<<<<<<< * buf = buffer.get(osize) * except MemoryError: */ } __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; goto __pyx_L11_try_end; __pyx_L6_error:; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /* "borg/compress.pyx":248 * try: * buf = buffer.get(osize) * except MemoryError: # <<<<<<<<<<<<<< * raise DecompressionError('MemoryError') * dest = buf */ __pyx_t_12 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_MemoryError); if (__pyx_t_12) { __Pyx_AddTraceback("borg.compress.ZSTD.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5) < 0) __PYX_ERR(0, 248, __pyx_L8_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_5); /* "borg/compress.pyx":249 * buf = buffer.get(osize) * except MemoryError: * raise DecompressionError('MemoryError') # <<<<<<<<<<<<<< * dest = buf * with nogil: */ __Pyx_GetModuleGlobalName(__pyx_t_13, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 249, __pyx_L8_except_error) __Pyx_GOTREF(__pyx_t_13); __pyx_t_14 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_13))) { __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_13); if (likely(__pyx_t_14)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13); __Pyx_INCREF(__pyx_t_14); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_13, function); } } __pyx_t_11 = (__pyx_t_14) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_14, __pyx_n_u_MemoryError) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_n_u_MemoryError); __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 249, __pyx_L8_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(0, 249, __pyx_L8_except_error) } goto __pyx_L8_except_error; __pyx_L8_except_error:; /* "borg/compress.pyx":246 * if osize == ZSTD_CONTENTSIZE_UNKNOWN: * raise DecompressionError('zstd get size failed: original size unknown') * try: # <<<<<<<<<<<<<< * buf = buffer.get(osize) * except MemoryError: */ __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_ExceptionReset(__pyx_t_8, __pyx_t_9, __pyx_t_10); goto __pyx_L1_error; __pyx_L11_try_end:; } /* "borg/compress.pyx":250 * except MemoryError: * raise DecompressionError('MemoryError') * dest = buf # <<<<<<<<<<<<<< * with nogil: * rsize = ZSTD_decompress(dest, osize, source, isize) */ __pyx_t_7 = __Pyx_PyObject_AsWritableString(__pyx_v_buf); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(0, 250, __pyx_L1_error) __pyx_v_dest = ((char *)__pyx_t_7); /* "borg/compress.pyx":251 * raise DecompressionError('MemoryError') * dest = buf * with nogil: # <<<<<<<<<<<<<< * rsize = ZSTD_decompress(dest, osize, source, isize) * if ZSTD_isError(rsize): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "borg/compress.pyx":252 * dest = buf * with nogil: * rsize = ZSTD_decompress(dest, osize, source, isize) # <<<<<<<<<<<<<< * if ZSTD_isError(rsize): * raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) */ __pyx_v_rsize = ZSTD_decompress(__pyx_v_dest, __pyx_v_osize, __pyx_v_source, __pyx_v_isize); } /* "borg/compress.pyx":251 * raise DecompressionError('MemoryError') * dest = buf * with nogil: # <<<<<<<<<<<<<< * rsize = ZSTD_decompress(dest, osize, source, isize) * if ZSTD_isError(rsize): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L16; } __pyx_L16:; } } /* "borg/compress.pyx":253 * with nogil: * rsize = ZSTD_decompress(dest, osize, source, isize) * if ZSTD_isError(rsize): # <<<<<<<<<<<<<< * raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) * if rsize != osize: */ __pyx_t_2 = (ZSTD_isError(__pyx_v_rsize) != 0); if (unlikely(__pyx_t_2)) { /* "borg/compress.pyx":254 * rsize = ZSTD_decompress(dest, osize, source, isize) * if ZSTD_isError(rsize): * raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) # <<<<<<<<<<<<<< * if rsize != osize: * raise DecompressionError('zstd decompress failed: size mismatch') */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyBytes_FromString(ZSTD_getErrorName(__pyx_v_rsize)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = PyUnicode_Format(__pyx_kp_u_zstd_decompress_failed_s, __pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_5 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_3, __pyx_t_11) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_11); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 254, __pyx_L1_error) /* "borg/compress.pyx":253 * with nogil: * rsize = ZSTD_decompress(dest, osize, source, isize) * if ZSTD_isError(rsize): # <<<<<<<<<<<<<< * raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) * if rsize != osize: */ } /* "borg/compress.pyx":255 * if ZSTD_isError(rsize): * raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) * if rsize != osize: # <<<<<<<<<<<<<< * raise DecompressionError('zstd decompress failed: size mismatch') * return dest[:osize] */ __pyx_t_2 = ((__pyx_v_rsize != __pyx_v_osize) != 0); if (unlikely(__pyx_t_2)) { /* "borg/compress.pyx":256 * raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) * if rsize != osize: * raise DecompressionError('zstd decompress failed: size mismatch') # <<<<<<<<<<<<<< * return dest[:osize] * */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_5 = (__pyx_t_11) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_11, __pyx_kp_u_zstd_decompress_failed_size_mism) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_kp_u_zstd_decompress_failed_size_mism); __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 256, __pyx_L1_error) /* "borg/compress.pyx":255 * if ZSTD_isError(rsize): * raise DecompressionError('zstd decompress failed: %s' % ZSTD_getErrorName(rsize)) * if rsize != osize: # <<<<<<<<<<<<<< * raise DecompressionError('zstd decompress failed: size mismatch') * return dest[:osize] */ } /* "borg/compress.pyx":257 * if rsize != osize: * raise DecompressionError('zstd decompress failed: size mismatch') * return dest[:osize] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_dest + 0, __pyx_v_osize - 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "borg/compress.pyx":232 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_13); __Pyx_XDECREF(__pyx_t_14); __Pyx_AddTraceback("borg.compress.ZSTD.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_buf); __Pyx_XDECREF(__pyx_v_idata); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":269 * * @classmethod * def detect(cls, data): # <<<<<<<<<<<<<< * # matches misc. patterns 0x.8.. used by zlib * cmf, flg = data[:2] */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4ZLIB_1detect(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4ZLIB_1detect = {"detect", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4ZLIB_1detect, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4ZLIB_1detect(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_cls = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("detect (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_cls,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cls)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("detect", 1, 2, 2, 1); __PYX_ERR(0, 269, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "detect") < 0)) __PYX_ERR(0, 269, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_cls = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("detect", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 269, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.ZLIB.detect", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4ZLIB_detect(__pyx_self, __pyx_v_cls, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4ZLIB_detect(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_cls, PyObject *__pyx_v_data) { PyObject *__pyx_v_cmf = NULL; PyObject *__pyx_v_flg = NULL; PyObject *__pyx_v_is_deflate = NULL; PyObject *__pyx_v_check_ok = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *(*__pyx_t_5)(PyObject *); int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("detect", 0); /* "borg/compress.pyx":271 * def detect(cls, data): * # matches misc. patterns 0x.8.. used by zlib * cmf, flg = data[:2] # <<<<<<<<<<<<<< * is_deflate = cmf & 0x0f == 8 * check_ok = (cmf * 256 + flg) % 31 == 0 */ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_data, 0, 2, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 271, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_2 = PyList_GET_ITEM(sequence, 0); __pyx_t_3 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; index = 0; __pyx_t_2 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_2)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); index = 1; __pyx_t_3 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_3)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) __PYX_ERR(0, 271, __pyx_L1_error) __pyx_t_5 = NULL; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L4_unpacking_done; __pyx_L3_unpacking_failed:; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 271, __pyx_L1_error) __pyx_L4_unpacking_done:; } __pyx_v_cmf = __pyx_t_2; __pyx_t_2 = 0; __pyx_v_flg = __pyx_t_3; __pyx_t_3 = 0; /* "borg/compress.pyx":272 * # matches misc. patterns 0x.8.. used by zlib * cmf, flg = data[:2] * is_deflate = cmf & 0x0f == 8 # <<<<<<<<<<<<<< * check_ok = (cmf * 256 + flg) % 31 == 0 * return check_ok and is_deflate */ __pyx_t_1 = __Pyx_PyInt_AndObjC(__pyx_v_cmf, __pyx_int_15, 0x0f, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_1, __pyx_int_8, 8, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_is_deflate = __pyx_t_3; __pyx_t_3 = 0; /* "borg/compress.pyx":273 * cmf, flg = data[:2] * is_deflate = cmf & 0x0f == 8 * check_ok = (cmf * 256 + flg) % 31 == 0 # <<<<<<<<<<<<<< * return check_ok and is_deflate * */ __pyx_t_3 = PyNumber_Multiply(__pyx_v_cmf, __pyx_int_256); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_v_flg); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_RemainderObjC(__pyx_t_1, __pyx_int_31, 31, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_3, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_check_ok = __pyx_t_1; __pyx_t_1 = 0; /* "borg/compress.pyx":274 * is_deflate = cmf & 0x0f == 8 * check_ok = (cmf * 256 + flg) % 31 == 0 * return check_ok and is_deflate # <<<<<<<<<<<<<< * * def __init__(self, level=6, **kwargs): */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_v_check_ok); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 274, __pyx_L1_error) if (__pyx_t_6) { } else { __Pyx_INCREF(__pyx_v_check_ok); __pyx_t_1 = __pyx_v_check_ok; goto __pyx_L5_bool_binop_done; } __Pyx_INCREF(__pyx_v_is_deflate); __pyx_t_1 = __pyx_v_is_deflate; __pyx_L5_bool_binop_done:; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":269 * * @classmethod * def detect(cls, data): # <<<<<<<<<<<<<< * # matches misc. patterns 0x.8.. used by zlib * cmf, flg = data[:2] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.compress.ZLIB.detect", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_cmf); __Pyx_XDECREF(__pyx_v_flg); __Pyx_XDECREF(__pyx_v_is_deflate); __Pyx_XDECREF(__pyx_v_check_ok); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":276 * return check_ok and is_deflate * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4ZLIB_3__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4ZLIB_3__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4ZLIB_3__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4ZLIB_3__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_level = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_level,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)((PyObject *)__pyx_int_6)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_level); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 276, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_level = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 276, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("borg.compress.ZLIB.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4ZLIB_2__init__(__pyx_self, __pyx_v_self, __pyx_v_level, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4ZLIB_2__init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_level, PyObject *__pyx_v_kwargs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "borg/compress.pyx":277 * * def __init__(self, level=6, **kwargs): * super().__init__(**kwargs) # <<<<<<<<<<<<<< * self.level = level * */ __pyx_t_1 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_1) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 277, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_self); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_v_kwargs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":278 * def __init__(self, level=6, **kwargs): * super().__init__(**kwargs) * self.level = level # <<<<<<<<<<<<<< * * def compress(self, data): */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_level, __pyx_v_level) < 0) __PYX_ERR(0, 278, __pyx_L1_error) /* "borg/compress.pyx":276 * return check_ok and is_deflate * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.compress.ZLIB.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":280 * self.level = level * * def compress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not add ID bytes * return zlib.compress(data, self.level) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4ZLIB_5compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4ZLIB_5compress = {"compress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4ZLIB_5compress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4ZLIB_5compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, 1); __PYX_ERR(0, 280, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compress") < 0)) __PYX_ERR(0, 280, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 280, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.ZLIB.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4ZLIB_4compress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4ZLIB_4compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); /* "borg/compress.pyx":282 * def compress(self, data): * # note: for compatibility no super call, do not add ID bytes * return zlib.compress(data, self.level) # <<<<<<<<<<<<<< * * def decompress(self, data): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_zlib); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_compress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_level); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_data, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_data, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_data); __Pyx_GIVEREF(__pyx_v_data); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_data); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":280 * self.level = level * * def compress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not add ID bytes * return zlib.compress(data, self.level) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("borg.compress.ZLIB.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":284 * return zlib.compress(data, self.level) * * def decompress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not strip ID bytes * try: */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4ZLIB_7decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4ZLIB_7decompress = {"decompress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4ZLIB_7decompress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4ZLIB_7decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, 1); __PYX_ERR(0, 284, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decompress") < 0)) __PYX_ERR(0, 284, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 284, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.ZLIB.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4ZLIB_6decompress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4ZLIB_6decompress(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_v_e = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_t_12; char const *__pyx_t_13; PyObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; PyObject *__pyx_t_18 = NULL; PyObject *__pyx_t_19 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); /* "borg/compress.pyx":286 * def decompress(self, data): * # note: for compatibility no super call, do not strip ID bytes * try: # <<<<<<<<<<<<<< * return zlib.decompress(data) * except zlib.error as e: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "borg/compress.pyx":287 * # note: for compatibility no super call, do not strip ID bytes * try: * return zlib.decompress(data) # <<<<<<<<<<<<<< * except zlib.error as e: * raise DecompressionError(str(e)) from None */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_zlib); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 287, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_decompress); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 287, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_4 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_5, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_data); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 287, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L7_try_return; /* "borg/compress.pyx":286 * def decompress(self, data): * # note: for compatibility no super call, do not strip ID bytes * try: # <<<<<<<<<<<<<< * return zlib.decompress(data) * except zlib.error as e: */ } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":288 * try: * return zlib.decompress(data) * except zlib.error as e: # <<<<<<<<<<<<<< * raise DecompressionError(str(e)) from None * */ __Pyx_ErrFetch(&__pyx_t_4, &__pyx_t_6, &__pyx_t_5); __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_zlib); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 288, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_error); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 288, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_9 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_4, __pyx_t_8); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_ErrRestore(__pyx_t_4, __pyx_t_6, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_t_5 = 0; if (__pyx_t_9) { __Pyx_AddTraceback("borg.compress.ZLIB.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_4) < 0) __PYX_ERR(0, 288, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __pyx_v_e = __pyx_t_6; /*try:*/ { /* "borg/compress.pyx":289 * return zlib.decompress(data) * except zlib.error as e: * raise DecompressionError(str(e)) from None # <<<<<<<<<<<<<< * * */ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 289, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_10 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyUnicode_Type)), __pyx_v_e); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 289, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_8 = (__pyx_t_11) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_11, __pyx_t_10) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 289, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, Py_None); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 289, __pyx_L14_error) } /* "borg/compress.pyx":288 * try: * return zlib.decompress(data) * except zlib.error as e: # <<<<<<<<<<<<<< * raise DecompressionError(str(e)) from None * */ /*finally:*/ { __pyx_L14_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __pyx_t_17 = 0; __pyx_t_18 = 0; __pyx_t_19 = 0; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_17, &__pyx_t_18, &__pyx_t_19); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16) < 0)) __Pyx_ErrFetch(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16); __Pyx_XGOTREF(__pyx_t_14); __Pyx_XGOTREF(__pyx_t_15); __Pyx_XGOTREF(__pyx_t_16); __Pyx_XGOTREF(__pyx_t_17); __Pyx_XGOTREF(__pyx_t_18); __Pyx_XGOTREF(__pyx_t_19); __pyx_t_9 = __pyx_lineno; __pyx_t_12 = __pyx_clineno; __pyx_t_13 = __pyx_filename; { __Pyx_DECREF(__pyx_v_e); __pyx_v_e = NULL; } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_17); __Pyx_XGIVEREF(__pyx_t_18); __Pyx_XGIVEREF(__pyx_t_19); __Pyx_ExceptionReset(__pyx_t_17, __pyx_t_18, __pyx_t_19); } __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_16); __Pyx_ErrRestore(__pyx_t_14, __pyx_t_15, __pyx_t_16); __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __pyx_t_17 = 0; __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_lineno = __pyx_t_9; __pyx_clineno = __pyx_t_12; __pyx_filename = __pyx_t_13; goto __pyx_L5_except_error; } } } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "borg/compress.pyx":286 * def decompress(self, data): * # note: for compatibility no super call, do not strip ID bytes * try: # <<<<<<<<<<<<<< * return zlib.decompress(data) * except zlib.error as e: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L7_try_return:; __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L0; } /* "borg/compress.pyx":284 * return zlib.compress(data, self.level) * * def decompress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not strip ID bytes * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("borg.compress.ZLIB.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_e); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":303 * name = 'auto' * * def __init__(self, compressor): # <<<<<<<<<<<<<< * super().__init__() * self.compressor = compressor */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4Auto_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4Auto_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4Auto_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4Auto_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_compressor = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_compressor,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_compressor)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 303, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 303, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_compressor = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 303, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Auto.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4Auto___init__(__pyx_self, __pyx_v_self, __pyx_v_compressor); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4Auto___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_compressor) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "borg/compress.pyx":304 * * def __init__(self, compressor): * super().__init__() # <<<<<<<<<<<<<< * self.compressor = compressor * self.lz4 = get_compressor('lz4') */ __pyx_t_2 = __Pyx_CyFunction_GetClassObj(__pyx_self); if (!__pyx_t_2) { PyErr_SetString(PyExc_SystemError, "super(): empty __class__ cell"); __PYX_ERR(0, 304, __pyx_L1_error) } __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_init); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2) : __Pyx_PyObject_CallNoArg(__pyx_t_3); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":305 * def __init__(self, compressor): * super().__init__() * self.compressor = compressor # <<<<<<<<<<<<<< * self.lz4 = get_compressor('lz4') * self.none = get_compressor('none') */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_compressor, __pyx_v_compressor) < 0) __PYX_ERR(0, 305, __pyx_L1_error) /* "borg/compress.pyx":306 * super().__init__() * self.compressor = compressor * self.lz4 = get_compressor('lz4') # <<<<<<<<<<<<<< * self.none = get_compressor('none') * */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_get_compressor); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_n_u_lz4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_n_u_lz4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_lz4, __pyx_t_1) < 0) __PYX_ERR(0, 306, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":307 * self.compressor = compressor * self.lz4 = get_compressor('lz4') * self.none = get_compressor('none') # <<<<<<<<<<<<<< * * def _decide(self, data): */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_get_compressor); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_n_u_none) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_n_u_none); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_none, __pyx_t_1) < 0) __PYX_ERR(0, 307, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":303 * name = 'auto' * * def __init__(self, compressor): # <<<<<<<<<<<<<< * super().__init__() * self.compressor = compressor */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.Auto.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":309 * self.none = get_compressor('none') * * def _decide(self, data): # <<<<<<<<<<<<<< * """ * Decides what to do with *data*. Returns (compressor, lz4_data). */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4Auto_3_decide(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4borg_8compress_4Auto_2_decide[] = "\n Decides what to do with *data*. Returns (compressor, lz4_data).\n\n *lz4_data* is the LZ4 result if *compressor* is LZ4 as well, otherwise it is None.\n "; static PyMethodDef __pyx_mdef_4borg_8compress_4Auto_3_decide = {"_decide", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4Auto_3_decide, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4borg_8compress_4Auto_2_decide}; static PyObject *__pyx_pw_4borg_8compress_4Auto_3_decide(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_decide (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_decide", 1, 2, 2, 1); __PYX_ERR(0, 309, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_decide") < 0)) __PYX_ERR(0, 309, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_decide", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 309, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Auto._decide", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4Auto_2_decide(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4Auto_2_decide(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_v_lz4_data = NULL; double __pyx_v_ratio; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_decide", 0); /* "borg/compress.pyx":315 * *lz4_data* is the LZ4 result if *compressor* is LZ4 as well, otherwise it is None. * """ * lz4_data = self.lz4.compress(data) # <<<<<<<<<<<<<< * ratio = len(lz4_data) / len(data) * # lz4_data includes the compression type header, while data does not yet */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lz4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_compress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_data); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_lz4_data = __pyx_t_1; __pyx_t_1 = 0; /* "borg/compress.pyx":316 * """ * lz4_data = self.lz4.compress(data) * ratio = len(lz4_data) / len(data) # <<<<<<<<<<<<<< * # lz4_data includes the compression type header, while data does not yet * ratio = len(lz4_data) / (len(data) + 2) */ __pyx_t_4 = PyObject_Length(__pyx_v_lz4_data); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 316, __pyx_L1_error) __pyx_t_5 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 316, __pyx_L1_error) if (unlikely(__pyx_t_5 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 316, __pyx_L1_error) } __pyx_v_ratio = (((double)__pyx_t_4) / ((double)__pyx_t_5)); /* "borg/compress.pyx":318 * ratio = len(lz4_data) / len(data) * # lz4_data includes the compression type header, while data does not yet * ratio = len(lz4_data) / (len(data) + 2) # <<<<<<<<<<<<<< * if ratio < 0.97: * return self.compressor, lz4_data */ __pyx_t_5 = PyObject_Length(__pyx_v_lz4_data); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 318, __pyx_L1_error) __pyx_t_4 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 318, __pyx_L1_error) __pyx_t_6 = (__pyx_t_4 + 2); if (unlikely(__pyx_t_6 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 318, __pyx_L1_error) } __pyx_v_ratio = (((double)__pyx_t_5) / ((double)__pyx_t_6)); /* "borg/compress.pyx":319 * # lz4_data includes the compression type header, while data does not yet * ratio = len(lz4_data) / (len(data) + 2) * if ratio < 0.97: # <<<<<<<<<<<<<< * return self.compressor, lz4_data * elif ratio < 1: */ __pyx_t_7 = ((__pyx_v_ratio < 0.97) != 0); if (__pyx_t_7) { /* "borg/compress.pyx":320 * ratio = len(lz4_data) / (len(data) + 2) * if ratio < 0.97: * return self.compressor, lz4_data # <<<<<<<<<<<<<< * elif ratio < 1: * return self.lz4, lz4_data */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_compressor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 320, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 320, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_INCREF(__pyx_v_lz4_data); __Pyx_GIVEREF(__pyx_v_lz4_data); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_lz4_data); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "borg/compress.pyx":319 * # lz4_data includes the compression type header, while data does not yet * ratio = len(lz4_data) / (len(data) + 2) * if ratio < 0.97: # <<<<<<<<<<<<<< * return self.compressor, lz4_data * elif ratio < 1: */ } /* "borg/compress.pyx":321 * if ratio < 0.97: * return self.compressor, lz4_data * elif ratio < 1: # <<<<<<<<<<<<<< * return self.lz4, lz4_data * else: */ __pyx_t_7 = ((__pyx_v_ratio < 1.0) != 0); if (__pyx_t_7) { /* "borg/compress.pyx":322 * return self.compressor, lz4_data * elif ratio < 1: * return self.lz4, lz4_data # <<<<<<<<<<<<<< * else: * return self.none, None */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lz4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_INCREF(__pyx_v_lz4_data); __Pyx_GIVEREF(__pyx_v_lz4_data); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_lz4_data); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":321 * if ratio < 0.97: * return self.compressor, lz4_data * elif ratio < 1: # <<<<<<<<<<<<<< * return self.lz4, lz4_data * else: */ } /* "borg/compress.pyx":324 * return self.lz4, lz4_data * else: * return self.none, None # <<<<<<<<<<<<<< * * def decide(self, data): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_none); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "borg/compress.pyx":309 * self.none = get_compressor('none') * * def _decide(self, data): # <<<<<<<<<<<<<< * """ * Decides what to do with *data*. Returns (compressor, lz4_data). */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.Auto._decide", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_lz4_data); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":326 * return self.none, None * * def decide(self, data): # <<<<<<<<<<<<<< * return self._decide(data)[0] * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4Auto_5decide(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4Auto_5decide = {"decide", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4Auto_5decide, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4Auto_5decide(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decide (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decide", 1, 2, 2, 1); __PYX_ERR(0, 326, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decide") < 0)) __PYX_ERR(0, 326, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decide", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 326, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Auto.decide", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4Auto_4decide(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4Auto_4decide(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decide", 0); /* "borg/compress.pyx":327 * * def decide(self, data): * return self._decide(data)[0] # <<<<<<<<<<<<<< * * def compress(self, data): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_decide); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 327, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_data); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 327, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 327, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "borg/compress.pyx":326 * return self.none, None * * def decide(self, data): # <<<<<<<<<<<<<< * return self._decide(data)[0] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.Auto.decide", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":329 * return self._decide(data)[0] * * def compress(self, data): # <<<<<<<<<<<<<< * compressor, lz4_data = self._decide(data) * if compressor is self.lz4: */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4Auto_7compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4Auto_7compress = {"compress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4Auto_7compress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4Auto_7compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, 1); __PYX_ERR(0, 329, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compress") < 0)) __PYX_ERR(0, 329, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 329, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Auto.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4Auto_6compress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4Auto_6compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_v_compressor = NULL; PyObject *__pyx_v_lz4_data = NULL; PyObject *__pyx_v_uncompressed_data = NULL; PyObject *__pyx_v_exp_compressed_data = NULL; double __pyx_v_ratio; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *(*__pyx_t_5)(PyObject *); int __pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); /* "borg/compress.pyx":330 * * def compress(self, data): * compressor, lz4_data = self._decide(data) # <<<<<<<<<<<<<< * if compressor is self.lz4: * # we know that trying to compress with expensive compressor is likely pointless, */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_decide); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_data); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 330, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_2 = PyList_GET_ITEM(sequence, 0); __pyx_t_3 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; index = 0; __pyx_t_2 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_2)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); index = 1; __pyx_t_3 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_3)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) __PYX_ERR(0, 330, __pyx_L1_error) __pyx_t_5 = NULL; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L4_unpacking_done; __pyx_L3_unpacking_failed:; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 330, __pyx_L1_error) __pyx_L4_unpacking_done:; } __pyx_v_compressor = __pyx_t_2; __pyx_t_2 = 0; __pyx_v_lz4_data = __pyx_t_3; __pyx_t_3 = 0; /* "borg/compress.pyx":331 * def compress(self, data): * compressor, lz4_data = self._decide(data) * if compressor is self.lz4: # <<<<<<<<<<<<<< * # we know that trying to compress with expensive compressor is likely pointless, * # but lz4 managed to at least squeeze the data a bit. */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_lz4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 331, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = (__pyx_v_compressor == __pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_7 = (__pyx_t_6 != 0); if (__pyx_t_7) { /* "borg/compress.pyx":334 * # we know that trying to compress with expensive compressor is likely pointless, * # but lz4 managed to at least squeeze the data a bit. * return lz4_data # <<<<<<<<<<<<<< * if compressor is self.none: * # we know that trying to compress with expensive compressor is likely pointless */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_lz4_data); __pyx_r = __pyx_v_lz4_data; goto __pyx_L0; /* "borg/compress.pyx":331 * def compress(self, data): * compressor, lz4_data = self._decide(data) * if compressor is self.lz4: # <<<<<<<<<<<<<< * # we know that trying to compress with expensive compressor is likely pointless, * # but lz4 managed to at least squeeze the data a bit. */ } /* "borg/compress.pyx":335 * # but lz4 managed to at least squeeze the data a bit. * return lz4_data * if compressor is self.none: # <<<<<<<<<<<<<< * # we know that trying to compress with expensive compressor is likely pointless * # and also lz4 did not manage to squeeze the data (not even a bit). */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_none); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 335, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = (__pyx_v_compressor == __pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_6 = (__pyx_t_7 != 0); if (__pyx_t_6) { /* "borg/compress.pyx":338 * # we know that trying to compress with expensive compressor is likely pointless * # and also lz4 did not manage to squeeze the data (not even a bit). * uncompressed_data = compressor.compress(data) # <<<<<<<<<<<<<< * return uncompressed_data * # if we get here, the decider decided to try the expensive compressor. */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_compressor, __pyx_n_s_compress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_data); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_uncompressed_data = __pyx_t_1; __pyx_t_1 = 0; /* "borg/compress.pyx":339 * # and also lz4 did not manage to squeeze the data (not even a bit). * uncompressed_data = compressor.compress(data) * return uncompressed_data # <<<<<<<<<<<<<< * # if we get here, the decider decided to try the expensive compressor. * # we also know that lz4_data is smaller than uncompressed data. */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_uncompressed_data); __pyx_r = __pyx_v_uncompressed_data; goto __pyx_L0; /* "borg/compress.pyx":335 * # but lz4 managed to at least squeeze the data a bit. * return lz4_data * if compressor is self.none: # <<<<<<<<<<<<<< * # we know that trying to compress with expensive compressor is likely pointless * # and also lz4 did not manage to squeeze the data (not even a bit). */ } /* "borg/compress.pyx":342 * # if we get here, the decider decided to try the expensive compressor. * # we also know that lz4_data is smaller than uncompressed data. * exp_compressed_data = compressor.compress(data) # <<<<<<<<<<<<<< * ratio = len(exp_compressed_data) / len(lz4_data) * if ratio < 0.99: */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_compressor, __pyx_n_s_compress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_data); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_exp_compressed_data = __pyx_t_1; __pyx_t_1 = 0; /* "borg/compress.pyx":343 * # we also know that lz4_data is smaller than uncompressed data. * exp_compressed_data = compressor.compress(data) * ratio = len(exp_compressed_data) / len(lz4_data) # <<<<<<<<<<<<<< * if ratio < 0.99: * # the expensive compressor managed to squeeze the data significantly better than lz4. */ __pyx_t_8 = PyObject_Length(__pyx_v_exp_compressed_data); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 343, __pyx_L1_error) __pyx_t_9 = PyObject_Length(__pyx_v_lz4_data); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(0, 343, __pyx_L1_error) if (unlikely(__pyx_t_9 == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 343, __pyx_L1_error) } __pyx_v_ratio = (((double)__pyx_t_8) / ((double)__pyx_t_9)); /* "borg/compress.pyx":344 * exp_compressed_data = compressor.compress(data) * ratio = len(exp_compressed_data) / len(lz4_data) * if ratio < 0.99: # <<<<<<<<<<<<<< * # the expensive compressor managed to squeeze the data significantly better than lz4. * return exp_compressed_data */ __pyx_t_6 = ((__pyx_v_ratio < 0.99) != 0); if (__pyx_t_6) { /* "borg/compress.pyx":346 * if ratio < 0.99: * # the expensive compressor managed to squeeze the data significantly better than lz4. * return exp_compressed_data # <<<<<<<<<<<<<< * else: * # otherwise let's just store the lz4 data, which decompresses extremely fast. */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_exp_compressed_data); __pyx_r = __pyx_v_exp_compressed_data; goto __pyx_L0; /* "borg/compress.pyx":344 * exp_compressed_data = compressor.compress(data) * ratio = len(exp_compressed_data) / len(lz4_data) * if ratio < 0.99: # <<<<<<<<<<<<<< * # the expensive compressor managed to squeeze the data significantly better than lz4. * return exp_compressed_data */ } /* "borg/compress.pyx":349 * else: * # otherwise let's just store the lz4 data, which decompresses extremely fast. * return lz4_data # <<<<<<<<<<<<<< * * def decompress(self, data): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_lz4_data); __pyx_r = __pyx_v_lz4_data; goto __pyx_L0; } /* "borg/compress.pyx":329 * return self._decide(data)[0] * * def compress(self, data): # <<<<<<<<<<<<<< * compressor, lz4_data = self._decide(data) * if compressor is self.lz4: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.compress.Auto.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_compressor); __Pyx_XDECREF(__pyx_v_lz4_data); __Pyx_XDECREF(__pyx_v_uncompressed_data); __Pyx_XDECREF(__pyx_v_exp_compressed_data); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":351 * return lz4_data * * def decompress(self, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4Auto_9decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4Auto_9decompress = {"decompress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4Auto_9decompress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4Auto_9decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; CYTHON_UNUSED PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, 1); __PYX_ERR(0, 351, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decompress") < 0)) __PYX_ERR(0, 351, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 351, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Auto.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4Auto_8decompress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4Auto_8decompress(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); /* "borg/compress.pyx":352 * * def decompress(self, data): * raise NotImplementedError # <<<<<<<<<<<<<< * * def detect(cls, data): */ __Pyx_Raise(__pyx_builtin_NotImplementedError, 0, 0, 0); __PYX_ERR(0, 352, __pyx_L1_error) /* "borg/compress.pyx":351 * return lz4_data * * def decompress(self, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.compress.Auto.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":354 * raise NotImplementedError * * def detect(cls, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_4Auto_11detect(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_4Auto_11detect = {"detect", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_4Auto_11detect, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_4Auto_11detect(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_cls = 0; CYTHON_UNUSED PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("detect (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_cls,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cls)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("detect", 1, 2, 2, 1); __PYX_ERR(0, 354, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "detect") < 0)) __PYX_ERR(0, 354, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_cls = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("detect", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 354, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Auto.detect", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_4Auto_10detect(__pyx_self, __pyx_v_cls, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_4Auto_10detect(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_cls, CYTHON_UNUSED PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("detect", 0); /* "borg/compress.pyx":355 * * def detect(cls, data): * raise NotImplementedError # <<<<<<<<<<<<<< * * */ __Pyx_Raise(__pyx_builtin_NotImplementedError, 0, 0, 0); __PYX_ERR(0, 355, __pyx_L1_error) /* "borg/compress.pyx":354 * raise NotImplementedError * * def detect(cls, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.compress.Auto.detect", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":370 * COMPRESSOR_LIST = [LZ4, ZSTD, CNONE, ZLIB, LZMA, ] # check fast stuff first * * def get_compressor(name, **kwargs): # <<<<<<<<<<<<<< * cls = COMPRESSOR_TABLE[name] * return cls(**kwargs) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_1get_compressor(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_1get_compressor = {"get_compressor", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_1get_compressor, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_1get_compressor(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get_compressor (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "get_compressor") < 0)) __PYX_ERR(0, 370, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("get_compressor", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 370, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("borg.compress.get_compressor", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_get_compressor(__pyx_self, __pyx_v_name, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_get_compressor(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_name, PyObject *__pyx_v_kwargs) { PyObject *__pyx_v_cls = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_compressor", 0); /* "borg/compress.pyx":371 * * def get_compressor(name, **kwargs): * cls = COMPRESSOR_TABLE[name] # <<<<<<<<<<<<<< * return cls(**kwargs) * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_COMPRESSOR_TABLE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 371, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_name); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 371, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cls = __pyx_t_2; __pyx_t_2 = 0; /* "borg/compress.pyx":372 * def get_compressor(name, **kwargs): * cls = COMPRESSOR_TABLE[name] * return cls(**kwargs) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_Call(__pyx_v_cls, __pyx_empty_tuple, __pyx_v_kwargs); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 372, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "borg/compress.pyx":370 * COMPRESSOR_LIST = [LZ4, ZSTD, CNONE, ZLIB, LZMA, ] # check fast stuff first * * def get_compressor(name, **kwargs): # <<<<<<<<<<<<<< * cls = COMPRESSOR_TABLE[name] * return cls(**kwargs) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("borg.compress.get_compressor", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_cls); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":380 * decompresses everything we can handle (autodetect) * """ * def __init__(self, name='null', **kwargs): # <<<<<<<<<<<<<< * self.params = kwargs * self.compressor = get_compressor(name, **self.params) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_10Compressor_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_10Compressor_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_10Compressor_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_10Compressor_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_name = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_name,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)((PyObject*)__pyx_n_u_null)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 380, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_name = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 380, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("borg.compress.Compressor.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_10Compressor___init__(__pyx_self, __pyx_v_self, __pyx_v_name, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_10Compressor___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_name, PyObject *__pyx_v_kwargs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "borg/compress.pyx":381 * """ * def __init__(self, name='null', **kwargs): * self.params = kwargs # <<<<<<<<<<<<<< * self.compressor = get_compressor(name, **self.params) * */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_params, __pyx_v_kwargs) < 0) __PYX_ERR(0, 381, __pyx_L1_error) /* "borg/compress.pyx":382 * def __init__(self, name='null', **kwargs): * self.params = kwargs * self.compressor = get_compressor(name, **self.params) # <<<<<<<<<<<<<< * * def compress(self, data): */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_get_compressor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_name); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_params); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (unlikely(__pyx_t_4 == Py_None)) { PyErr_SetString(PyExc_TypeError, "argument after ** must be a mapping, not NoneType"); __PYX_ERR(0, 382, __pyx_L1_error) } if (likely(PyDict_CheckExact(__pyx_t_4))) { __pyx_t_3 = PyDict_Copy(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { __pyx_t_3 = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_compressor, __pyx_t_4) < 0) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "borg/compress.pyx":380 * decompresses everything we can handle (autodetect) * """ * def __init__(self, name='null', **kwargs): # <<<<<<<<<<<<<< * self.params = kwargs * self.compressor = get_compressor(name, **self.params) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("borg.compress.Compressor.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":384 * self.compressor = get_compressor(name, **self.params) * * def compress(self, data): # <<<<<<<<<<<<<< * return self.compressor.compress(data) * */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_10Compressor_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_10Compressor_3compress = {"compress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_10Compressor_3compress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_10Compressor_3compress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, 1); __PYX_ERR(0, 384, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compress") < 0)) __PYX_ERR(0, 384, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 384, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Compressor.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_10Compressor_2compress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_10Compressor_2compress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compress", 0); /* "borg/compress.pyx":385 * * def compress(self, data): * return self.compressor.compress(data) # <<<<<<<<<<<<<< * * def decompress(self, data): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_compressor); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_compress); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_data); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":384 * self.compressor = get_compressor(name, **self.params) * * def compress(self, data): # <<<<<<<<<<<<<< * return self.compressor.compress(data) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.Compressor.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":387 * return self.compressor.compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * compressor_cls = self.detect(data) * return compressor_cls(**self.params).decompress(data) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_10Compressor_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_10Compressor_5decompress = {"decompress", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_10Compressor_5decompress, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_10Compressor_5decompress(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("decompress (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, 1); __PYX_ERR(0, 387, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "decompress") < 0)) __PYX_ERR(0, 387, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("decompress", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 387, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.Compressor.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_10Compressor_4decompress(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_10Compressor_4decompress(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_v_compressor_cls = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("decompress", 0); /* "borg/compress.pyx":388 * * def decompress(self, data): * compressor_cls = self.detect(data) # <<<<<<<<<<<<<< * return compressor_cls(**self.params).decompress(data) * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_detect); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_data); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_compressor_cls = __pyx_t_1; __pyx_t_1 = 0; /* "borg/compress.pyx":389 * def decompress(self, data): * compressor_cls = self.detect(data) * return compressor_cls(**self.params).decompress(data) # <<<<<<<<<<<<<< * * @staticmethod */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_params); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (unlikely(__pyx_t_3 == Py_None)) { PyErr_SetString(PyExc_TypeError, "argument after ** must be a mapping, not NoneType"); __PYX_ERR(0, 389, __pyx_L1_error) } if (likely(PyDict_CheckExact(__pyx_t_3))) { __pyx_t_2 = PyDict_Copy(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __pyx_t_2 = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_compressor_cls, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_decompress); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_data); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":387 * return self.compressor.compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * compressor_cls = self.detect(data) * return compressor_cls(**self.params).decompress(data) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.Compressor.decompress", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_compressor_cls); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":392 * * @staticmethod * def detect(data): # <<<<<<<<<<<<<< * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_10Compressor_7detect(PyObject *__pyx_self, PyObject *__pyx_v_data); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_10Compressor_7detect = {"detect", (PyCFunction)__pyx_pw_4borg_8compress_10Compressor_7detect, METH_O, 0}; static PyObject *__pyx_pw_4borg_8compress_10Compressor_7detect(PyObject *__pyx_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("detect (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_10Compressor_6detect(__pyx_self, ((PyObject *)__pyx_v_data)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_10Compressor_6detect(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data) { PyObject *__pyx_v_hdr = NULL; PyObject *__pyx_v_cls = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("detect", 0); /* "borg/compress.pyx":393 * @staticmethod * def detect(data): * hdr = bytes(data[:2]) # detect() does not work with memoryview # <<<<<<<<<<<<<< * for cls in COMPRESSOR_LIST: * if cls.detect(hdr): */ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_data, 0, 2, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_hdr = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "borg/compress.pyx":394 * def detect(data): * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: # <<<<<<<<<<<<<< * if cls.detect(hdr): * return cls */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_COMPRESSOR_LIST); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 394, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 394, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 394, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 394, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 394, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 394, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 394, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } } else { __pyx_t_2 = __pyx_t_4(__pyx_t_1); if (unlikely(!__pyx_t_2)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 394, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_2); } __Pyx_XDECREF_SET(__pyx_v_cls, __pyx_t_2); __pyx_t_2 = 0; /* "borg/compress.pyx":395 * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: * if cls.detect(hdr): # <<<<<<<<<<<<<< * return cls * else: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_cls, __pyx_n_s_detect); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 395, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_2 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_v_hdr) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_hdr); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 395, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 395, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_7) { /* "borg/compress.pyx":396 * for cls in COMPRESSOR_LIST: * if cls.detect(hdr): * return cls # <<<<<<<<<<<<<< * else: * raise ValueError('No decompressor for this data found: %r.', data[:2]) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_cls); __pyx_r = __pyx_v_cls; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":395 * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: * if cls.detect(hdr): # <<<<<<<<<<<<<< * return cls * else: */ } /* "borg/compress.pyx":394 * def detect(data): * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: # <<<<<<<<<<<<<< * if cls.detect(hdr): * return cls */ } /*else*/ { /* "borg/compress.pyx":398 * return cls * else: * raise ValueError('No decompressor for this data found: %r.', data[:2]) # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_data, 0, 2, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_kp_u_No_decompressor_for_this_data_fo); __Pyx_GIVEREF(__pyx_kp_u_No_decompressor_for_this_data_fo); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_No_decompressor_for_this_data_fo); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 398, __pyx_L1_error) } /* "borg/compress.pyx":394 * def detect(data): * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: # <<<<<<<<<<<<<< * if cls.detect(hdr): * return cls */ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":392 * * @staticmethod * def detect(data): # <<<<<<<<<<<<<< * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("borg.compress.Compressor.detect", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_hdr); __Pyx_XDECREF(__pyx_v_cls); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":402 * * class CompressionSpec: * def __init__(self, s): # <<<<<<<<<<<<<< * values = s.split(',') * count = len(values) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_15CompressionSpec_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_15CompressionSpec_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_15CompressionSpec_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_15CompressionSpec_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_s = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_s,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_s)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 402, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 402, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_s = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 402, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.CompressionSpec.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_15CompressionSpec___init__(__pyx_self, __pyx_v_self, __pyx_v_s); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_15CompressionSpec___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_s) { PyObject *__pyx_v_values = NULL; Py_ssize_t __pyx_v_count; PyObject *__pyx_v_level = NULL; PyObject *__pyx_v_compression = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "borg/compress.pyx":403 * class CompressionSpec: * def __init__(self, s): * values = s.split(',') # <<<<<<<<<<<<<< * count = len(values) * if count < 1: */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_s, __pyx_n_s_split); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_kp_u__5) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_kp_u__5); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_values = __pyx_t_1; __pyx_t_1 = 0; /* "borg/compress.pyx":404 * def __init__(self, s): * values = s.split(',') * count = len(values) # <<<<<<<<<<<<<< * if count < 1: * raise ValueError */ __pyx_t_4 = PyObject_Length(__pyx_v_values); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 404, __pyx_L1_error) __pyx_v_count = __pyx_t_4; /* "borg/compress.pyx":405 * values = s.split(',') * count = len(values) * if count < 1: # <<<<<<<<<<<<<< * raise ValueError * # --compression algo[,level] */ __pyx_t_5 = ((__pyx_v_count < 1) != 0); if (unlikely(__pyx_t_5)) { /* "borg/compress.pyx":406 * count = len(values) * if count < 1: * raise ValueError # <<<<<<<<<<<<<< * # --compression algo[,level] * self.name = values[0] */ __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); __PYX_ERR(0, 406, __pyx_L1_error) /* "borg/compress.pyx":405 * values = s.split(',') * count = len(values) * if count < 1: # <<<<<<<<<<<<<< * raise ValueError * # --compression algo[,level] */ } /* "borg/compress.pyx":408 * raise ValueError * # --compression algo[,level] * self.name = values[0] # <<<<<<<<<<<<<< * if self.name in ('none', 'lz4', ): * return */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_values, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_name, __pyx_t_1) < 0) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":409 * # --compression algo[,level] * self.name = values[0] * if self.name in ('none', 'lz4', ): # <<<<<<<<<<<<<< * return * elif self.name in ('zlib', 'lzma', ): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_none, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 409, __pyx_L1_error) if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L5_bool_binop_done; } __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_lz4, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 409, __pyx_L1_error) __pyx_t_5 = __pyx_t_6; __pyx_L5_bool_binop_done:; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_6 = (__pyx_t_5 != 0); if (__pyx_t_6) { /* "borg/compress.pyx":410 * self.name = values[0] * if self.name in ('none', 'lz4', ): * return # <<<<<<<<<<<<<< * elif self.name in ('zlib', 'lzma', ): * if count < 2: */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "borg/compress.pyx":409 * # --compression algo[,level] * self.name = values[0] * if self.name in ('none', 'lz4', ): # <<<<<<<<<<<<<< * return * elif self.name in ('zlib', 'lzma', ): */ } /* "borg/compress.pyx":411 * if self.name in ('none', 'lz4', ): * return * elif self.name in ('zlib', 'lzma', ): # <<<<<<<<<<<<<< * if count < 2: * level = 6 # default compression level in py stdlib */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_zlib, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 411, __pyx_L1_error) if (!__pyx_t_5) { } else { __pyx_t_6 = __pyx_t_5; goto __pyx_L7_bool_binop_done; } __pyx_t_5 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_lzma, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 411, __pyx_L1_error) __pyx_t_6 = __pyx_t_5; __pyx_L7_bool_binop_done:; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (__pyx_t_6 != 0); if (__pyx_t_5) { /* "borg/compress.pyx":412 * return * elif self.name in ('zlib', 'lzma', ): * if count < 2: # <<<<<<<<<<<<<< * level = 6 # default compression level in py stdlib * elif count == 2: */ __pyx_t_5 = ((__pyx_v_count < 2) != 0); if (__pyx_t_5) { /* "borg/compress.pyx":413 * elif self.name in ('zlib', 'lzma', ): * if count < 2: * level = 6 # default compression level in py stdlib # <<<<<<<<<<<<<< * elif count == 2: * level = int(values[1]) */ __Pyx_INCREF(__pyx_int_6); __pyx_v_level = __pyx_int_6; /* "borg/compress.pyx":412 * return * elif self.name in ('zlib', 'lzma', ): * if count < 2: # <<<<<<<<<<<<<< * level = 6 # default compression level in py stdlib * elif count == 2: */ goto __pyx_L9; } /* "borg/compress.pyx":414 * if count < 2: * level = 6 # default compression level in py stdlib * elif count == 2: # <<<<<<<<<<<<<< * level = int(values[1]) * if not 0 <= level <= 9: */ __pyx_t_5 = ((__pyx_v_count == 2) != 0); if (likely(__pyx_t_5)) { /* "borg/compress.pyx":415 * level = 6 # default compression level in py stdlib * elif count == 2: * level = int(values[1]) # <<<<<<<<<<<<<< * if not 0 <= level <= 9: * raise ValueError */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_values, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_level = __pyx_t_2; __pyx_t_2 = 0; /* "borg/compress.pyx":416 * elif count == 2: * level = int(values[1]) * if not 0 <= level <= 9: # <<<<<<<<<<<<<< * raise ValueError * else: */ __pyx_t_2 = PyObject_RichCompare(__pyx_int_0, __pyx_v_level, Py_LE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 416, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_2)) { __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = PyObject_RichCompare(__pyx_v_level, __pyx_int_9, Py_LE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 416, __pyx_L1_error) } __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 416, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = ((!__pyx_t_5) != 0); if (unlikely(__pyx_t_6)) { /* "borg/compress.pyx":417 * level = int(values[1]) * if not 0 <= level <= 9: * raise ValueError # <<<<<<<<<<<<<< * else: * raise ValueError */ __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); __PYX_ERR(0, 417, __pyx_L1_error) /* "borg/compress.pyx":416 * elif count == 2: * level = int(values[1]) * if not 0 <= level <= 9: # <<<<<<<<<<<<<< * raise ValueError * else: */ } /* "borg/compress.pyx":414 * if count < 2: * level = 6 # default compression level in py stdlib * elif count == 2: # <<<<<<<<<<<<<< * level = int(values[1]) * if not 0 <= level <= 9: */ goto __pyx_L9; } /* "borg/compress.pyx":419 * raise ValueError * else: * raise ValueError # <<<<<<<<<<<<<< * self.level = level * elif self.name in ('zstd', ): */ /*else*/ { __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); __PYX_ERR(0, 419, __pyx_L1_error) } __pyx_L9:; /* "borg/compress.pyx":420 * else: * raise ValueError * self.level = level # <<<<<<<<<<<<<< * elif self.name in ('zstd', ): * if count < 2: */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_level, __pyx_v_level) < 0) __PYX_ERR(0, 420, __pyx_L1_error) /* "borg/compress.pyx":411 * if self.name in ('none', 'lz4', ): * return * elif self.name in ('zlib', 'lzma', ): # <<<<<<<<<<<<<< * if count < 2: * level = 6 # default compression level in py stdlib */ goto __pyx_L4; } /* "borg/compress.pyx":421 * raise ValueError * self.level = level * elif self.name in ('zstd', ): # <<<<<<<<<<<<<< * if count < 2: * level = 3 # default compression level in zstd */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_2, __pyx_n_u_zstd, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 421, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = (__pyx_t_6 != 0); if (__pyx_t_5) { /* "borg/compress.pyx":422 * self.level = level * elif self.name in ('zstd', ): * if count < 2: # <<<<<<<<<<<<<< * level = 3 # default compression level in zstd * elif count == 2: */ __pyx_t_5 = ((__pyx_v_count < 2) != 0); if (__pyx_t_5) { /* "borg/compress.pyx":423 * elif self.name in ('zstd', ): * if count < 2: * level = 3 # default compression level in zstd # <<<<<<<<<<<<<< * elif count == 2: * level = int(values[1]) */ __Pyx_INCREF(__pyx_int_3); __pyx_v_level = __pyx_int_3; /* "borg/compress.pyx":422 * self.level = level * elif self.name in ('zstd', ): * if count < 2: # <<<<<<<<<<<<<< * level = 3 # default compression level in zstd * elif count == 2: */ goto __pyx_L11; } /* "borg/compress.pyx":424 * if count < 2: * level = 3 # default compression level in zstd * elif count == 2: # <<<<<<<<<<<<<< * level = int(values[1]) * if not 1 <= level <= 22: */ __pyx_t_5 = ((__pyx_v_count == 2) != 0); if (likely(__pyx_t_5)) { /* "borg/compress.pyx":425 * level = 3 # default compression level in zstd * elif count == 2: * level = int(values[1]) # <<<<<<<<<<<<<< * if not 1 <= level <= 22: * raise ValueError */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_values, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_level = __pyx_t_1; __pyx_t_1 = 0; /* "borg/compress.pyx":426 * elif count == 2: * level = int(values[1]) * if not 1 <= level <= 22: # <<<<<<<<<<<<<< * raise ValueError * else: */ __pyx_t_1 = PyObject_RichCompare(__pyx_int_1, __pyx_v_level, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 426, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_1)) { __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = PyObject_RichCompare(__pyx_v_level, __pyx_int_22, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 426, __pyx_L1_error) } __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_6 = ((!__pyx_t_5) != 0); if (unlikely(__pyx_t_6)) { /* "borg/compress.pyx":427 * level = int(values[1]) * if not 1 <= level <= 22: * raise ValueError # <<<<<<<<<<<<<< * else: * raise ValueError */ __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); __PYX_ERR(0, 427, __pyx_L1_error) /* "borg/compress.pyx":426 * elif count == 2: * level = int(values[1]) * if not 1 <= level <= 22: # <<<<<<<<<<<<<< * raise ValueError * else: */ } /* "borg/compress.pyx":424 * if count < 2: * level = 3 # default compression level in zstd * elif count == 2: # <<<<<<<<<<<<<< * level = int(values[1]) * if not 1 <= level <= 22: */ goto __pyx_L11; } /* "borg/compress.pyx":429 * raise ValueError * else: * raise ValueError # <<<<<<<<<<<<<< * self.level = level * elif self.name == 'auto': */ /*else*/ { __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); __PYX_ERR(0, 429, __pyx_L1_error) } __pyx_L11:; /* "borg/compress.pyx":430 * else: * raise ValueError * self.level = level # <<<<<<<<<<<<<< * elif self.name == 'auto': * if 2 <= count <= 3: */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_level, __pyx_v_level) < 0) __PYX_ERR(0, 430, __pyx_L1_error) /* "borg/compress.pyx":421 * raise ValueError * self.level = level * elif self.name in ('zstd', ): # <<<<<<<<<<<<<< * if count < 2: * level = 3 # default compression level in zstd */ goto __pyx_L4; } /* "borg/compress.pyx":431 * raise ValueError * self.level = level * elif self.name == 'auto': # <<<<<<<<<<<<<< * if 2 <= count <= 3: * compression = ','.join(values[1:]) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 431, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_auto, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 431, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (likely(__pyx_t_6)) { /* "borg/compress.pyx":432 * self.level = level * elif self.name == 'auto': * if 2 <= count <= 3: # <<<<<<<<<<<<<< * compression = ','.join(values[1:]) * else: */ __pyx_t_6 = (2 <= __pyx_v_count); if (__pyx_t_6) { __pyx_t_6 = (__pyx_v_count <= 3); } __pyx_t_5 = (__pyx_t_6 != 0); if (likely(__pyx_t_5)) { /* "borg/compress.pyx":433 * elif self.name == 'auto': * if 2 <= count <= 3: * compression = ','.join(values[1:]) # <<<<<<<<<<<<<< * else: * raise ValueError */ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_values, 1, 0, NULL, NULL, &__pyx_slice__6, 1, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 433, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyUnicode_Join(__pyx_kp_u__5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 433, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_compression = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "borg/compress.pyx":432 * self.level = level * elif self.name == 'auto': * if 2 <= count <= 3: # <<<<<<<<<<<<<< * compression = ','.join(values[1:]) * else: */ goto __pyx_L13; } /* "borg/compress.pyx":435 * compression = ','.join(values[1:]) * else: * raise ValueError # <<<<<<<<<<<<<< * self.inner = CompressionSpec(compression) * else: */ /*else*/ { __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); __PYX_ERR(0, 435, __pyx_L1_error) } __pyx_L13:; /* "borg/compress.pyx":436 * else: * raise ValueError * self.inner = CompressionSpec(compression) # <<<<<<<<<<<<<< * else: * raise ValueError */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_CompressionSpec); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_2 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_compression) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_compression); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_inner, __pyx_t_2) < 0) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/compress.pyx":431 * raise ValueError * self.level = level * elif self.name == 'auto': # <<<<<<<<<<<<<< * if 2 <= count <= 3: * compression = ','.join(values[1:]) */ goto __pyx_L4; } /* "borg/compress.pyx":438 * self.inner = CompressionSpec(compression) * else: * raise ValueError # <<<<<<<<<<<<<< * * @property */ /*else*/ { __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); __PYX_ERR(0, 438, __pyx_L1_error) } __pyx_L4:; /* "borg/compress.pyx":402 * * class CompressionSpec: * def __init__(self, s): # <<<<<<<<<<<<<< * values = s.split(',') * count = len(values) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.compress.CompressionSpec.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_values); __Pyx_XDECREF(__pyx_v_level); __Pyx_XDECREF(__pyx_v_compression); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/compress.pyx":441 * * @property * def compressor(self): # <<<<<<<<<<<<<< * if self.name in ('none', 'lz4', ): * return get_compressor(self.name) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_15CompressionSpec_3compressor(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_15CompressionSpec_3compressor = {"compressor", (PyCFunction)__pyx_pw_4borg_8compress_15CompressionSpec_3compressor, METH_O, 0}; static PyObject *__pyx_pw_4borg_8compress_15CompressionSpec_3compressor(PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compressor (wrapper)", 0); __pyx_r = __pyx_pf_4borg_8compress_15CompressionSpec_2compressor(__pyx_self, ((PyObject *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_15CompressionSpec_2compressor(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("compressor", 0); /* "borg/compress.pyx":442 * @property * def compressor(self): * if self.name in ('none', 'lz4', ): # <<<<<<<<<<<<<< * return get_compressor(self.name) * elif self.name in ('zlib', 'lzma', 'zstd', ): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 442, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_none, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 442, __pyx_L1_error) if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_lz4, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 442, __pyx_L1_error) __pyx_t_2 = __pyx_t_3; __pyx_L4_bool_binop_done:; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "borg/compress.pyx":443 * def compressor(self): * if self.name in ('none', 'lz4', ): * return get_compressor(self.name) # <<<<<<<<<<<<<< * elif self.name in ('zlib', 'lzma', 'zstd', ): * return get_compressor(self.name, level=self.level) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_get_compressor); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/compress.pyx":442 * @property * def compressor(self): * if self.name in ('none', 'lz4', ): # <<<<<<<<<<<<<< * return get_compressor(self.name) * elif self.name in ('zlib', 'lzma', 'zstd', ): */ } /* "borg/compress.pyx":444 * if self.name in ('none', 'lz4', ): * return get_compressor(self.name) * elif self.name in ('zlib', 'lzma', 'zstd', ): # <<<<<<<<<<<<<< * return get_compressor(self.name, level=self.level) * elif self.name == 'auto': */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 444, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_zlib, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 444, __pyx_L1_error) if (!__pyx_t_2) { } else { __pyx_t_3 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_lzma, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 444, __pyx_L1_error) if (!__pyx_t_2) { } else { __pyx_t_3 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_zstd, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 444, __pyx_L1_error) __pyx_t_3 = __pyx_t_2; __pyx_L6_bool_binop_done:; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "borg/compress.pyx":445 * return get_compressor(self.name) * elif self.name in ('zlib', 'lzma', 'zstd', ): * return get_compressor(self.name, level=self.level) # <<<<<<<<<<<<<< * elif self.name == 'auto': * return get_compressor(self.name, compressor=self.inner.compressor) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_get_compressor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_level); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_level, __pyx_t_6) < 0) __PYX_ERR(0, 445, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "borg/compress.pyx":444 * if self.name in ('none', 'lz4', ): * return get_compressor(self.name) * elif self.name in ('zlib', 'lzma', 'zstd', ): # <<<<<<<<<<<<<< * return get_compressor(self.name, level=self.level) * elif self.name == 'auto': */ } /* "borg/compress.pyx":446 * elif self.name in ('zlib', 'lzma', 'zstd', ): * return get_compressor(self.name, level=self.level) * elif self.name == 'auto': # <<<<<<<<<<<<<< * return get_compressor(self.name, compressor=self.inner.compressor) */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 446, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_t_6, __pyx_n_u_auto, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 446, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__pyx_t_2) { /* "borg/compress.pyx":447 * return get_compressor(self.name, level=self.level) * elif self.name == 'auto': * return get_compressor(self.name, compressor=self.inner.compressor) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_get_compressor); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_name); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_inner); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_compressor); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_compressor, __pyx_t_7) < 0) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "borg/compress.pyx":446 * elif self.name in ('zlib', 'lzma', 'zstd', ): * return get_compressor(self.name, level=self.level) * elif self.name == 'auto': # <<<<<<<<<<<<<< * return get_compressor(self.name, compressor=self.inner.compressor) */ } /* "borg/compress.pyx":441 * * @property * def compressor(self): # <<<<<<<<<<<<<< * if self.name in ('none', 'lz4', ): * return get_compressor(self.name) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("borg.compress.CompressionSpec.compressor", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __pyx_unpickle_CompressorBase(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_8compress_3__pyx_unpickle_CompressorBase(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_8compress_3__pyx_unpickle_CompressorBase = {"__pyx_unpickle_CompressorBase", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_8compress_3__pyx_unpickle_CompressorBase, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_8compress_3__pyx_unpickle_CompressorBase(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_CompressorBase (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressorBase", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressorBase", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_CompressorBase") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_CompressorBase", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.compress.__pyx_unpickle_CompressorBase", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_8compress_2__pyx_unpickle_CompressorBase(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_8compress_2__pyx_unpickle_CompressorBase(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_CompressorBase", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xd41d8cd: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xd41d8cd = ())" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xd41d8cd) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xd41d8cd: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xd41d8cd = ())" % __pyx_checksum) * __pyx_result = CompressorBase.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xd41d8cd: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xd41d8cd = ())" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = CompressorBase.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xd4, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xd41d8cd: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xd41d8cd = ())" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xd41d8cd = ())" % __pyx_checksum) * __pyx_result = CompressorBase.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_CompressorBase__set_state( __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xd41d8cd = ())" % __pyx_checksum) * __pyx_result = CompressorBase.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_CompressorBase__set_state( __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = CompressorBase.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_CompressorBase__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_CompressorBase__set_state(CompressorBase __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_f_4borg_8compress___pyx_unpickle_CompressorBase__set_state(((struct __pyx_obj_4borg_8compress_CompressorBase *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xd41d8cd = ())" % __pyx_checksum) * __pyx_result = CompressorBase.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_CompressorBase__set_state( __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_CompressorBase__set_state( __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_CompressorBase__set_state(CompressorBase __pyx_result, tuple __pyx_state): * if len(__pyx_state) > 0 and hasattr(__pyx_result, '__dict__'): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_CompressorBase(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.compress.__pyx_unpickle_CompressorBase", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_CompressorBase__set_state( __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_CompressorBase__set_state(CompressorBase __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * if len(__pyx_state) > 0 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[0]) */ static PyObject *__pyx_f_4borg_8compress___pyx_unpickle_CompressorBase__set_state(struct __pyx_obj_4borg_8compress_CompressorBase *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_CompressorBase__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_CompressorBase__set_state(CompressorBase __pyx_result, tuple __pyx_state): * if len(__pyx_state) > 0 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[0]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_2 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_t_3 = ((__pyx_t_2 > 0) != 0); if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 12, __pyx_L1_error) __pyx_t_4 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_4; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "(tree fragment)":13 * cdef __pyx_unpickle_CompressorBase__set_state(CompressorBase __pyx_result, tuple __pyx_state): * if len(__pyx_state) > 0 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[0]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_CompressorBase__set_state(CompressorBase __pyx_result, tuple __pyx_state): * if len(__pyx_state) > 0 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[0]) */ } /* "(tree fragment)":11 * __pyx_unpickle_CompressorBase__set_state( __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_CompressorBase__set_state(CompressorBase __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * if len(__pyx_state) > 0 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[0]) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("borg.compress.__pyx_unpickle_CompressorBase__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_tp_new_4borg_8compress_CompressorBase(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; return o; } static void __pyx_tp_dealloc_4borg_8compress_CompressorBase(PyObject *o) { #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif (*Py_TYPE(o)->tp_free)(o); } static PyMethodDef __pyx_methods_4borg_8compress_CompressorBase[] = { {"detect", (PyCFunction)__pyx_pw_4borg_8compress_14CompressorBase_1detect, METH_O, 0}, {"decide", (PyCFunction)__pyx_pw_4borg_8compress_14CompressorBase_5decide, METH_O, __pyx_doc_4borg_8compress_14CompressorBase_4decide}, {"compress", (PyCFunction)__pyx_pw_4borg_8compress_14CompressorBase_7compress, METH_O, __pyx_doc_4borg_8compress_14CompressorBase_6compress}, {"decompress", (PyCFunction)__pyx_pw_4borg_8compress_14CompressorBase_9decompress, METH_O, __pyx_doc_4borg_8compress_14CompressorBase_8decompress}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_8compress_14CompressorBase_11__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_8compress_14CompressorBase_13__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_4borg_8compress_CompressorBase = { PyVarObject_HEAD_INIT(0, 0) "borg.compress.CompressorBase", /*tp_name*/ sizeof(struct __pyx_obj_4borg_8compress_CompressorBase), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_8compress_CompressorBase, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ "\n base class for all (de)compression classes,\n also handles compression format auto detection and\n adding/stripping the ID header (which enable auto detection).\n ", /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4borg_8compress_CompressorBase, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_pw_4borg_8compress_14CompressorBase_3__init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_8compress_CompressorBase, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_compress(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_compress}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "compress", __pyx_k_borg_compress_Compression_is_ap, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1_1_06, __pyx_k_1_1_06, sizeof(__pyx_k_1_1_06), 0, 1, 0, 0}, {&__pyx_n_s_API_VERSION, __pyx_k_API_VERSION, sizeof(__pyx_k_API_VERSION), 0, 0, 1, 1}, {&__pyx_n_s_Auto, __pyx_k_Auto, sizeof(__pyx_k_Auto), 0, 0, 1, 1}, {&__pyx_n_s_Auto___init, __pyx_k_Auto___init, sizeof(__pyx_k_Auto___init), 0, 0, 1, 1}, {&__pyx_n_s_Auto__decide, __pyx_k_Auto__decide, sizeof(__pyx_k_Auto__decide), 0, 0, 1, 1}, {&__pyx_n_s_Auto_compress, __pyx_k_Auto_compress, sizeof(__pyx_k_Auto_compress), 0, 0, 1, 1}, {&__pyx_n_s_Auto_decide, __pyx_k_Auto_decide, sizeof(__pyx_k_Auto_decide), 0, 0, 1, 1}, {&__pyx_n_s_Auto_decompress, __pyx_k_Auto_decompress, sizeof(__pyx_k_Auto_decompress), 0, 0, 1, 1}, {&__pyx_n_s_Auto_detect, __pyx_k_Auto_detect, sizeof(__pyx_k_Auto_detect), 0, 0, 1, 1}, {&__pyx_n_s_Buffer, __pyx_k_Buffer, sizeof(__pyx_k_Buffer), 0, 0, 1, 1}, {&__pyx_n_s_CHECK_NONE, __pyx_k_CHECK_NONE, sizeof(__pyx_k_CHECK_NONE), 0, 0, 1, 1}, {&__pyx_n_s_CNONE, __pyx_k_CNONE, sizeof(__pyx_k_CNONE), 0, 0, 1, 1}, {&__pyx_n_s_CNONE_compress, __pyx_k_CNONE_compress, sizeof(__pyx_k_CNONE_compress), 0, 0, 1, 1}, {&__pyx_n_s_CNONE_decompress, __pyx_k_CNONE_decompress, sizeof(__pyx_k_CNONE_decompress), 0, 0, 1, 1}, {&__pyx_n_s_COMPRESSOR_LIST, __pyx_k_COMPRESSOR_LIST, sizeof(__pyx_k_COMPRESSOR_LIST), 0, 0, 1, 1}, {&__pyx_n_s_COMPRESSOR_TABLE, __pyx_k_COMPRESSOR_TABLE, sizeof(__pyx_k_COMPRESSOR_TABLE), 0, 0, 1, 1}, {&__pyx_n_s_CompressionSpec, __pyx_k_CompressionSpec, sizeof(__pyx_k_CompressionSpec), 0, 0, 1, 1}, {&__pyx_n_s_CompressionSpec___init, __pyx_k_CompressionSpec___init, sizeof(__pyx_k_CompressionSpec___init), 0, 0, 1, 1}, {&__pyx_n_s_CompressionSpec_compressor, __pyx_k_CompressionSpec_compressor, sizeof(__pyx_k_CompressionSpec_compressor), 0, 0, 1, 1}, {&__pyx_n_s_Compressor, __pyx_k_Compressor, sizeof(__pyx_k_Compressor), 0, 0, 1, 1}, {&__pyx_n_s_CompressorBase, __pyx_k_CompressorBase, sizeof(__pyx_k_CompressorBase), 0, 0, 1, 1}, {&__pyx_n_s_Compressor___init, __pyx_k_Compressor___init, sizeof(__pyx_k_Compressor___init), 0, 0, 1, 1}, {&__pyx_n_s_Compressor_compress, __pyx_k_Compressor_compress, sizeof(__pyx_k_Compressor_compress), 0, 0, 1, 1}, {&__pyx_n_s_Compressor_decompress, __pyx_k_Compressor_decompress, sizeof(__pyx_k_Compressor_decompress), 0, 0, 1, 1}, {&__pyx_n_s_Compressor_detect, __pyx_k_Compressor_detect, sizeof(__pyx_k_Compressor_detect), 0, 0, 1, 1}, {&__pyx_n_s_DecompressionError, __pyx_k_DecompressionError, sizeof(__pyx_k_DecompressionError), 0, 0, 1, 1}, {&__pyx_n_s_ID, __pyx_k_ID, sizeof(__pyx_k_ID), 0, 0, 1, 1}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xd4, __pyx_k_Incompatible_checksums_s_vs_0xd4, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xd4), 0, 0, 1, 0}, {&__pyx_n_s_LZ4, __pyx_k_LZ4, sizeof(__pyx_k_LZ4), 0, 0, 1, 1}, {&__pyx_n_s_LZ4___init, __pyx_k_LZ4___init, sizeof(__pyx_k_LZ4___init), 0, 0, 1, 1}, {&__pyx_n_s_LZ4_compress, __pyx_k_LZ4_compress, sizeof(__pyx_k_LZ4_compress), 0, 0, 1, 1}, {&__pyx_n_s_LZ4_decompress, __pyx_k_LZ4_decompress, sizeof(__pyx_k_LZ4_decompress), 0, 0, 1, 1}, {&__pyx_n_s_LZMA, __pyx_k_LZMA, sizeof(__pyx_k_LZMA), 0, 0, 1, 1}, {&__pyx_n_s_LZMAError, __pyx_k_LZMAError, sizeof(__pyx_k_LZMAError), 0, 0, 1, 1}, {&__pyx_n_s_LZMA___init, __pyx_k_LZMA___init, sizeof(__pyx_k_LZMA___init), 0, 0, 1, 1}, {&__pyx_n_s_LZMA_compress, __pyx_k_LZMA_compress, sizeof(__pyx_k_LZMA_compress), 0, 0, 1, 1}, {&__pyx_n_s_LZMA_decompress, __pyx_k_LZMA_decompress, sizeof(__pyx_k_LZMA_decompress), 0, 0, 1, 1}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_n_u_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 1, 0, 1}, {&__pyx_kp_s_Meta_Compressor_that_decides_wh, __pyx_k_Meta_Compressor_that_decides_wh, sizeof(__pyx_k_Meta_Compressor_that_decides_wh), 0, 0, 1, 0}, {&__pyx_kp_u_No_decompressor_for_this_data_fo, __pyx_k_No_decompressor_for_this_data_fo, sizeof(__pyx_k_No_decompressor_for_this_data_fo), 0, 1, 0, 0}, {&__pyx_kp_u_No_lzma_support_found, __pyx_k_No_lzma_support_found, sizeof(__pyx_k_No_lzma_support_found), 0, 1, 0, 0}, {&__pyx_n_s_NotImplementedError, __pyx_k_NotImplementedError, sizeof(__pyx_k_NotImplementedError), 0, 0, 1, 1}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_ZLIB, __pyx_k_ZLIB, sizeof(__pyx_k_ZLIB), 0, 0, 1, 1}, {&__pyx_n_s_ZLIB___init, __pyx_k_ZLIB___init, sizeof(__pyx_k_ZLIB___init), 0, 0, 1, 1}, {&__pyx_n_s_ZLIB_compress, __pyx_k_ZLIB_compress, sizeof(__pyx_k_ZLIB_compress), 0, 0, 1, 1}, {&__pyx_n_s_ZLIB_decompress, __pyx_k_ZLIB_decompress, sizeof(__pyx_k_ZLIB_decompress), 0, 0, 1, 1}, {&__pyx_n_s_ZLIB_detect, __pyx_k_ZLIB_detect, sizeof(__pyx_k_ZLIB_detect), 0, 0, 1, 1}, {&__pyx_n_s_ZSTD, __pyx_k_ZSTD, sizeof(__pyx_k_ZSTD), 0, 0, 1, 1}, {&__pyx_n_s_ZSTD___init, __pyx_k_ZSTD___init, sizeof(__pyx_k_ZSTD___init), 0, 0, 1, 1}, {&__pyx_n_s_ZSTD_compress, __pyx_k_ZSTD_compress, sizeof(__pyx_k_ZSTD_compress), 0, 0, 1, 1}, {&__pyx_n_s_ZSTD_decompress, __pyx_k_ZSTD_decompress, sizeof(__pyx_k_ZSTD_decompress), 0, 0, 1, 1}, {&__pyx_kp_b__13, __pyx_k__13, sizeof(__pyx_k__13), 0, 0, 0, 0}, {&__pyx_kp_b__20, __pyx_k__20, sizeof(__pyx_k__20), 0, 0, 0, 0}, {&__pyx_kp_b__28, __pyx_k__28, sizeof(__pyx_k__28), 0, 0, 0, 0}, {&__pyx_kp_b__36, __pyx_k__36, sizeof(__pyx_k__36), 0, 0, 0, 0}, {&__pyx_kp_u__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 1, 0, 0}, {&__pyx_kp_b__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 0, 0, 0}, {&__pyx_kp_b__8, __pyx_k__8, sizeof(__pyx_k__8), 0, 0, 0, 0}, {&__pyx_n_u_auto, __pyx_k_auto, sizeof(__pyx_k_auto), 0, 1, 0, 1}, {&__pyx_n_u_baseclass, __pyx_k_baseclass, sizeof(__pyx_k_baseclass), 0, 1, 0, 1}, {&__pyx_n_s_borg_compress, __pyx_k_borg_compress, sizeof(__pyx_k_borg_compress), 0, 0, 1, 1}, {&__pyx_n_s_buf, __pyx_k_buf, sizeof(__pyx_k_buf), 0, 0, 1, 1}, {&__pyx_n_s_buffer, __pyx_k_buffer, sizeof(__pyx_k_buffer), 0, 0, 1, 1}, {&__pyx_n_s_check, __pyx_k_check, sizeof(__pyx_k_check), 0, 0, 1, 1}, {&__pyx_n_s_check_ok, __pyx_k_check_ok, sizeof(__pyx_k_check_ok), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_cls, __pyx_k_cls, sizeof(__pyx_k_cls), 0, 0, 1, 1}, {&__pyx_n_s_cmf, __pyx_k_cmf, sizeof(__pyx_k_cmf), 0, 0, 1, 1}, {&__pyx_n_s_compress, __pyx_k_compress, sizeof(__pyx_k_compress), 0, 0, 1, 1}, {&__pyx_kp_s_compresses_using_a_compressor_w, __pyx_k_compresses_using_a_compressor_w, sizeof(__pyx_k_compresses_using_a_compressor_w), 0, 0, 1, 0}, {&__pyx_n_s_compression, __pyx_k_compression, sizeof(__pyx_k_compression), 0, 0, 1, 1}, {&__pyx_n_s_compressor, __pyx_k_compressor, sizeof(__pyx_k_compressor), 0, 0, 1, 1}, {&__pyx_n_s_compressor_cls, __pyx_k_compressor_cls, sizeof(__pyx_k_compressor_cls), 0, 0, 1, 1}, {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, {&__pyx_n_s_decide, __pyx_k_decide, sizeof(__pyx_k_decide), 0, 0, 1, 1}, {&__pyx_n_s_decide_2, __pyx_k_decide_2, sizeof(__pyx_k_decide_2), 0, 0, 1, 1}, {&__pyx_n_s_decompress, __pyx_k_decompress, sizeof(__pyx_k_decompress), 0, 0, 1, 1}, {&__pyx_n_s_dest, __pyx_k_dest, sizeof(__pyx_k_dest), 0, 0, 1, 1}, {&__pyx_n_s_detect, __pyx_k_detect, sizeof(__pyx_k_detect), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, {&__pyx_n_s_e, __pyx_k_e, sizeof(__pyx_k_e), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_exp_compressed_data, __pyx_k_exp_compressed_data, sizeof(__pyx_k_exp_compressed_data), 0, 0, 1, 1}, {&__pyx_n_s_flg, __pyx_k_flg, sizeof(__pyx_k_flg), 0, 0, 1, 1}, {&__pyx_n_s_get, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1}, {&__pyx_n_s_get_compressor, __pyx_k_get_compressor, sizeof(__pyx_k_get_compressor), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_n_s_hdr, __pyx_k_hdr, sizeof(__pyx_k_hdr), 0, 0, 1, 1}, {&__pyx_n_s_helpers, __pyx_k_helpers, sizeof(__pyx_k_helpers), 0, 0, 1, 1}, {&__pyx_n_s_idata, __pyx_k_idata, sizeof(__pyx_k_idata), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, {&__pyx_n_s_inner, __pyx_k_inner, sizeof(__pyx_k_inner), 0, 0, 1, 1}, {&__pyx_n_s_is_deflate, __pyx_k_is_deflate, sizeof(__pyx_k_is_deflate), 0, 0, 1, 1}, {&__pyx_n_s_isize, __pyx_k_isize, sizeof(__pyx_k_isize), 0, 0, 1, 1}, {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1}, {&__pyx_n_s_level, __pyx_k_level, sizeof(__pyx_k_level), 0, 0, 1, 1}, {&__pyx_n_s_lz4, __pyx_k_lz4, sizeof(__pyx_k_lz4), 0, 0, 1, 1}, {&__pyx_n_u_lz4, __pyx_k_lz4, sizeof(__pyx_k_lz4), 0, 1, 0, 1}, {&__pyx_kp_u_lz4_compress_failed, __pyx_k_lz4_compress_failed, sizeof(__pyx_k_lz4_compress_failed), 0, 1, 0, 0}, {&__pyx_n_s_lz4_data, __pyx_k_lz4_data, sizeof(__pyx_k_lz4_data), 0, 0, 1, 1}, {&__pyx_kp_u_lz4_decompress_failed, __pyx_k_lz4_decompress_failed, sizeof(__pyx_k_lz4_decompress_failed), 0, 1, 0, 0}, {&__pyx_n_s_lzma, __pyx_k_lzma, sizeof(__pyx_k_lzma), 0, 0, 1, 1}, {&__pyx_n_u_lzma, __pyx_k_lzma, sizeof(__pyx_k_lzma), 0, 1, 0, 1}, {&__pyx_kp_s_lzma_compression_decompression, __pyx_k_lzma_compression_decompression, sizeof(__pyx_k_lzma_compression_decompression), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_n_s_none, __pyx_k_none, sizeof(__pyx_k_none), 0, 0, 1, 1}, {&__pyx_n_u_none, __pyx_k_none, sizeof(__pyx_k_none), 0, 1, 0, 1}, {&__pyx_kp_s_none_no_compression_just_pass_t, __pyx_k_none_no_compression_just_pass_t, sizeof(__pyx_k_none_no_compression_just_pass_t), 0, 0, 1, 0}, {&__pyx_n_u_null, __pyx_k_null, sizeof(__pyx_k_null), 0, 1, 0, 1}, {&__pyx_n_s_osize, __pyx_k_osize, sizeof(__pyx_k_osize), 0, 0, 1, 1}, {&__pyx_n_s_params, __pyx_k_params, sizeof(__pyx_k_params), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, {&__pyx_n_s_preset, __pyx_k_preset, sizeof(__pyx_k_preset), 0, 0, 1, 1}, {&__pyx_n_s_property, __pyx_k_property, sizeof(__pyx_k_property), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_CompressorBase, __pyx_k_pyx_unpickle_CompressorBase, sizeof(__pyx_k_pyx_unpickle_CompressorBase), 0, 0, 1, 1}, {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, {&__pyx_n_s_ratio, __pyx_k_ratio, sizeof(__pyx_k_ratio), 0, 0, 1, 1}, {&__pyx_kp_s_raw_LZ4_compression_decompressi, __pyx_k_raw_LZ4_compression_decompressi, sizeof(__pyx_k_raw_LZ4_compression_decompressi), 0, 0, 1, 0}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_rsize, __pyx_k_rsize, sizeof(__pyx_k_rsize), 0, 0, 1, 1}, {&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_source, __pyx_k_source, sizeof(__pyx_k_source), 0, 0, 1, 1}, {&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, {&__pyx_kp_s_src_borg_compress_pyx, __pyx_k_src_borg_compress_pyx, sizeof(__pyx_k_src_borg_compress_pyx), 0, 0, 1, 0}, {&__pyx_n_s_startswith, __pyx_k_startswith, sizeof(__pyx_k_startswith), 0, 0, 1, 1}, {&__pyx_n_s_staticmethod, __pyx_k_staticmethod, sizeof(__pyx_k_staticmethod), 0, 0, 1, 1}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_super, __pyx_k_super, sizeof(__pyx_k_super), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_uncompressed_data, __pyx_k_uncompressed_data, sizeof(__pyx_k_uncompressed_data), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, {&__pyx_n_s_zlib, __pyx_k_zlib, sizeof(__pyx_k_zlib), 0, 0, 1, 1}, {&__pyx_n_u_zlib, __pyx_k_zlib, sizeof(__pyx_k_zlib), 0, 1, 0, 1}, {&__pyx_kp_s_zlib_compression_decompression, __pyx_k_zlib_compression_decompression, sizeof(__pyx_k_zlib_compression_decompression), 0, 0, 1, 0}, {&__pyx_n_u_zstd, __pyx_k_zstd, sizeof(__pyx_k_zstd), 0, 1, 0, 1}, {&__pyx_kp_u_zstd_compress_failed_s, __pyx_k_zstd_compress_failed_s, sizeof(__pyx_k_zstd_compress_failed_s), 0, 1, 0, 0}, {&__pyx_kp_s_zstd_compression_decompression_p, __pyx_k_zstd_compression_decompression_p, sizeof(__pyx_k_zstd_compression_decompression_p), 0, 0, 1, 0}, {&__pyx_kp_u_zstd_decompress_failed_s, __pyx_k_zstd_decompress_failed_s, sizeof(__pyx_k_zstd_decompress_failed_s), 0, 1, 0, 0}, {&__pyx_kp_u_zstd_decompress_failed_size_mism, __pyx_k_zstd_decompress_failed_size_mism, sizeof(__pyx_k_zstd_decompress_failed_size_mism), 0, 1, 0, 0}, {&__pyx_kp_u_zstd_get_size_failed_data_was_no, __pyx_k_zstd_get_size_failed_data_was_no, sizeof(__pyx_k_zstd_get_size_failed_data_was_no), 0, 1, 0, 0}, {&__pyx_kp_u_zstd_get_size_failed_original_si, __pyx_k_zstd_get_size_failed_original_si, sizeof(__pyx_k_zstd_get_size_failed_original_si), 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(0, 24, __pyx_L1_error) __pyx_builtin_staticmethod = __Pyx_GetBuiltinName(__pyx_n_s_staticmethod); if (!__pyx_builtin_staticmethod) __PYX_ERR(0, 391, __pyx_L1_error) __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) __PYX_ERR(0, 440, __pyx_L1_error) __pyx_builtin_super = __Pyx_GetBuiltinName(__pyx_n_s_super); if (!__pyx_builtin_super) __PYX_ERR(0, 109, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(0, 163, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 188, __pyx_L1_error) __pyx_builtin_NotImplementedError = __Pyx_GetBuiltinName(__pyx_n_s_NotImplementedError); if (!__pyx_builtin_NotImplementedError) __PYX_ERR(0, 352, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "borg/compress.pyx":98 * """ * # strip ID bytes * return data[2:] # <<<<<<<<<<<<<< * * */ __pyx_slice_ = PySlice_New(__pyx_int_2, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); /* "borg/compress.pyx":145 * osize = LZ4_compress_default(source, dest, isize, osize) * if not osize: * raise Exception('lz4 compress failed') # <<<<<<<<<<<<<< * return super().compress(dest[:osize]) * */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_lz4_compress_failed); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "borg/compress.pyx":188 * self.level = level * if lzma is None: * raise ValueError('No lzma support found.') # <<<<<<<<<<<<<< * * def compress(self, data): */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_No_lzma_support_found); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "borg/compress.pyx":271 * def detect(cls, data): * # matches misc. patterns 0x.8.. used by zlib * cmf, flg = data[:2] # <<<<<<<<<<<<<< * is_deflate = cmf & 0x0f == 8 * check_ok = (cmf * 256 + flg) % 31 == 0 */ __pyx_slice__4 = PySlice_New(Py_None, __pyx_int_2, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 271, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); /* "borg/compress.pyx":433 * elif self.name == 'auto': * if 2 <= count <= 3: * compression = ','.join(values[1:]) # <<<<<<<<<<<<<< * else: * raise ValueError */ __pyx_slice__6 = PySlice_New(__pyx_int_1, Py_None, Py_None); if (unlikely(!__pyx_slice__6)) __PYX_ERR(0, 433, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__6); __Pyx_GIVEREF(__pyx_slice__6); /* "borg/compress.pyx":108 * name = 'none' * * def compress(self, data): # <<<<<<<<<<<<<< * return super().compress(data) * */ __pyx_tuple__9 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_data); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compress, 108, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) __PYX_ERR(0, 108, __pyx_L1_error) /* "borg/compress.pyx":111 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * if not isinstance(data, bytes): */ __pyx_tuple__11 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_data); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decompress, 111, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 111, __pyx_L1_error) /* "borg/compress.pyx":130 * name = 'lz4' * * def __init__(self, **kwargs): # <<<<<<<<<<<<<< * pass * */ __pyx_tuple__14 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_kwargs); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); __pyx_codeobj__15 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__14, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_init, 130, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__15)) __PYX_ERR(0, 130, __pyx_L1_error) /* "borg/compress.pyx":133 * pass * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_tuple__16 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_idata, __pyx_n_s_isize, __pyx_n_s_osize, __pyx_n_s_source, __pyx_n_s_dest, __pyx_n_s_buf); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(0, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); __pyx_codeobj__17 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__16, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compress, 133, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__17)) __PYX_ERR(0, 133, __pyx_L1_error) /* "borg/compress.pyx":148 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_tuple__18 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_idata, __pyx_n_s_isize, __pyx_n_s_osize, __pyx_n_s_rsize, __pyx_n_s_source, __pyx_n_s_dest, __pyx_n_s_buf); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); __pyx_codeobj__19 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__18, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decompress, 148, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__19)) __PYX_ERR(0, 148, __pyx_L1_error) /* "borg/compress.pyx":184 * name = 'lzma' * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ __pyx_tuple__21 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_level, __pyx_n_s_kwargs); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_init, 184, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 184, __pyx_L1_error) __pyx_tuple__23 = PyTuple_Pack(1, ((PyObject *)__pyx_int_6)); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "borg/compress.pyx":190 * raise ValueError('No lzma support found.') * * def compress(self, data): # <<<<<<<<<<<<<< * # we do not need integrity checks in lzma, we do that already * data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE) */ __pyx_tuple__24 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_data); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compress, 190, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 190, __pyx_L1_error) /* "borg/compress.pyx":195 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * try: */ __pyx_tuple__26 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_e); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 195, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decompress, 195, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 195, __pyx_L1_error) /* "borg/compress.pyx":211 * name = 'zstd' * * def __init__(self, level=3, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ __pyx_tuple__29 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_level, __pyx_n_s_kwargs); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_init, 211, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 211, __pyx_L1_error) __pyx_tuple__31 = PyTuple_Pack(1, ((PyObject *)__pyx_int_3)); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); /* "borg/compress.pyx":215 * self.level = level * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_tuple__32 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_idata, __pyx_n_s_isize, __pyx_n_s_osize, __pyx_n_s_source, __pyx_n_s_dest, __pyx_n_s_level, __pyx_n_s_buf); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(0, 215, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__32); __Pyx_GIVEREF(__pyx_tuple__32); __pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compress, 215, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(0, 215, __pyx_L1_error) /* "borg/compress.pyx":232 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_tuple__34 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_idata, __pyx_n_s_isize, __pyx_n_s_osize, __pyx_n_s_rsize, __pyx_n_s_source, __pyx_n_s_dest, __pyx_n_s_buf); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(0, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__34); __Pyx_GIVEREF(__pyx_tuple__34); __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decompress, 232, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(0, 232, __pyx_L1_error) /* "borg/compress.pyx":269 * * @classmethod * def detect(cls, data): # <<<<<<<<<<<<<< * # matches misc. patterns 0x.8.. used by zlib * cmf, flg = data[:2] */ __pyx_tuple__37 = PyTuple_Pack(6, __pyx_n_s_cls, __pyx_n_s_data, __pyx_n_s_cmf, __pyx_n_s_flg, __pyx_n_s_is_deflate, __pyx_n_s_check_ok); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__37); __Pyx_GIVEREF(__pyx_tuple__37); __pyx_codeobj__38 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_detect, 269, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__38)) __PYX_ERR(0, 269, __pyx_L1_error) /* "borg/compress.pyx":276 * return check_ok and is_deflate * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ __pyx_tuple__39 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_level, __pyx_n_s_kwargs); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__39); __Pyx_GIVEREF(__pyx_tuple__39); __pyx_codeobj__40 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_init, 276, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__40)) __PYX_ERR(0, 276, __pyx_L1_error) __pyx_tuple__41 = PyTuple_Pack(1, ((PyObject *)__pyx_int_6)); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__41); __Pyx_GIVEREF(__pyx_tuple__41); /* "borg/compress.pyx":280 * self.level = level * * def compress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not add ID bytes * return zlib.compress(data, self.level) */ __pyx_tuple__42 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_data); if (unlikely(!__pyx_tuple__42)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__42); __Pyx_GIVEREF(__pyx_tuple__42); __pyx_codeobj__43 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__42, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compress, 280, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__43)) __PYX_ERR(0, 280, __pyx_L1_error) /* "borg/compress.pyx":284 * return zlib.compress(data, self.level) * * def decompress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not strip ID bytes * try: */ __pyx_tuple__44 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_e); if (unlikely(!__pyx_tuple__44)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__44); __Pyx_GIVEREF(__pyx_tuple__44); __pyx_codeobj__45 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__44, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decompress, 284, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__45)) __PYX_ERR(0, 284, __pyx_L1_error) /* "borg/compress.pyx":303 * name = 'auto' * * def __init__(self, compressor): # <<<<<<<<<<<<<< * super().__init__() * self.compressor = compressor */ __pyx_tuple__46 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_compressor); if (unlikely(!__pyx_tuple__46)) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__46); __Pyx_GIVEREF(__pyx_tuple__46); __pyx_codeobj__47 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__46, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_init, 303, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__47)) __PYX_ERR(0, 303, __pyx_L1_error) /* "borg/compress.pyx":309 * self.none = get_compressor('none') * * def _decide(self, data): # <<<<<<<<<<<<<< * """ * Decides what to do with *data*. Returns (compressor, lz4_data). */ __pyx_tuple__48 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_lz4_data, __pyx_n_s_ratio); if (unlikely(!__pyx_tuple__48)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__48); __Pyx_GIVEREF(__pyx_tuple__48); __pyx_codeobj__49 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__48, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decide, 309, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__49)) __PYX_ERR(0, 309, __pyx_L1_error) /* "borg/compress.pyx":326 * return self.none, None * * def decide(self, data): # <<<<<<<<<<<<<< * return self._decide(data)[0] * */ __pyx_tuple__50 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_data); if (unlikely(!__pyx_tuple__50)) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__50); __Pyx_GIVEREF(__pyx_tuple__50); __pyx_codeobj__51 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__50, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decide_2, 326, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__51)) __PYX_ERR(0, 326, __pyx_L1_error) /* "borg/compress.pyx":329 * return self._decide(data)[0] * * def compress(self, data): # <<<<<<<<<<<<<< * compressor, lz4_data = self._decide(data) * if compressor is self.lz4: */ __pyx_tuple__52 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_compressor, __pyx_n_s_lz4_data, __pyx_n_s_uncompressed_data, __pyx_n_s_exp_compressed_data, __pyx_n_s_ratio); if (unlikely(!__pyx_tuple__52)) __PYX_ERR(0, 329, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__52); __Pyx_GIVEREF(__pyx_tuple__52); __pyx_codeobj__53 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__52, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compress, 329, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__53)) __PYX_ERR(0, 329, __pyx_L1_error) /* "borg/compress.pyx":351 * return lz4_data * * def decompress(self, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ __pyx_tuple__54 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_data); if (unlikely(!__pyx_tuple__54)) __PYX_ERR(0, 351, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__54); __Pyx_GIVEREF(__pyx_tuple__54); __pyx_codeobj__55 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__54, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decompress, 351, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__55)) __PYX_ERR(0, 351, __pyx_L1_error) /* "borg/compress.pyx":354 * raise NotImplementedError * * def detect(cls, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ __pyx_tuple__56 = PyTuple_Pack(2, __pyx_n_s_cls, __pyx_n_s_data); if (unlikely(!__pyx_tuple__56)) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__56); __Pyx_GIVEREF(__pyx_tuple__56); __pyx_codeobj__57 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__56, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_detect, 354, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__57)) __PYX_ERR(0, 354, __pyx_L1_error) /* "borg/compress.pyx":370 * COMPRESSOR_LIST = [LZ4, ZSTD, CNONE, ZLIB, LZMA, ] # check fast stuff first * * def get_compressor(name, **kwargs): # <<<<<<<<<<<<<< * cls = COMPRESSOR_TABLE[name] * return cls(**kwargs) */ __pyx_tuple__58 = PyTuple_Pack(3, __pyx_n_s_name, __pyx_n_s_kwargs, __pyx_n_s_cls); if (unlikely(!__pyx_tuple__58)) __PYX_ERR(0, 370, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__58); __Pyx_GIVEREF(__pyx_tuple__58); __pyx_codeobj__59 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__58, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_get_compressor, 370, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__59)) __PYX_ERR(0, 370, __pyx_L1_error) /* "borg/compress.pyx":380 * decompresses everything we can handle (autodetect) * """ * def __init__(self, name='null', **kwargs): # <<<<<<<<<<<<<< * self.params = kwargs * self.compressor = get_compressor(name, **self.params) */ __pyx_tuple__60 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_name, __pyx_n_s_kwargs); if (unlikely(!__pyx_tuple__60)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__60); __Pyx_GIVEREF(__pyx_tuple__60); __pyx_codeobj__61 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__60, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_init, 380, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__61)) __PYX_ERR(0, 380, __pyx_L1_error) __pyx_tuple__62 = PyTuple_Pack(1, ((PyObject*)__pyx_n_u_null)); if (unlikely(!__pyx_tuple__62)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__62); __Pyx_GIVEREF(__pyx_tuple__62); /* "borg/compress.pyx":384 * self.compressor = get_compressor(name, **self.params) * * def compress(self, data): # <<<<<<<<<<<<<< * return self.compressor.compress(data) * */ __pyx_tuple__63 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_data); if (unlikely(!__pyx_tuple__63)) __PYX_ERR(0, 384, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__63); __Pyx_GIVEREF(__pyx_tuple__63); __pyx_codeobj__64 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__63, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compress, 384, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__64)) __PYX_ERR(0, 384, __pyx_L1_error) /* "borg/compress.pyx":387 * return self.compressor.compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * compressor_cls = self.detect(data) * return compressor_cls(**self.params).decompress(data) */ __pyx_tuple__65 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_compressor_cls); if (unlikely(!__pyx_tuple__65)) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__65); __Pyx_GIVEREF(__pyx_tuple__65); __pyx_codeobj__66 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__65, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_decompress, 387, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__66)) __PYX_ERR(0, 387, __pyx_L1_error) /* "borg/compress.pyx":392 * * @staticmethod * def detect(data): # <<<<<<<<<<<<<< * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: */ __pyx_tuple__67 = PyTuple_Pack(3, __pyx_n_s_data, __pyx_n_s_hdr, __pyx_n_s_cls); if (unlikely(!__pyx_tuple__67)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__67); __Pyx_GIVEREF(__pyx_tuple__67); __pyx_codeobj__68 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__67, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_detect, 392, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__68)) __PYX_ERR(0, 392, __pyx_L1_error) /* "borg/compress.pyx":402 * * class CompressionSpec: * def __init__(self, s): # <<<<<<<<<<<<<< * values = s.split(',') * count = len(values) */ __pyx_tuple__69 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_s, __pyx_n_s_values, __pyx_n_s_count, __pyx_n_s_level, __pyx_n_s_compression); if (unlikely(!__pyx_tuple__69)) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__69); __Pyx_GIVEREF(__pyx_tuple__69); __pyx_codeobj__70 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__69, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_init, 402, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__70)) __PYX_ERR(0, 402, __pyx_L1_error) /* "borg/compress.pyx":441 * * @property * def compressor(self): # <<<<<<<<<<<<<< * if self.name in ('none', 'lz4', ): * return get_compressor(self.name) */ __pyx_tuple__71 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__71)) __PYX_ERR(0, 441, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__71); __Pyx_GIVEREF(__pyx_tuple__71); __pyx_codeobj__72 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__71, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_compress_pyx, __pyx_n_s_compressor, 441, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__72)) __PYX_ERR(0, 441, __pyx_L1_error) /* "(tree fragment)":1 * def __pyx_unpickle_CompressorBase(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__73 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__73)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__73); __Pyx_GIVEREF(__pyx_tuple__73); __pyx_codeobj__74 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__73, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_CompressorBase, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__74)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_6 = PyInt_FromLong(6); if (unlikely(!__pyx_int_6)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_9 = PyInt_FromLong(9); if (unlikely(!__pyx_int_9)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_22 = PyInt_FromLong(22); if (unlikely(!__pyx_int_22)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_31 = PyInt_FromLong(31); if (unlikely(!__pyx_int_31)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_256 = PyInt_FromLong(256); if (unlikely(!__pyx_int_256)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_222419149 = PyInt_FromLong(222419149L); if (unlikely(!__pyx_int_222419149)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_4borg_8compress_CompressorBase) < 0) __PYX_ERR(0, 52, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_8compress_CompressorBase.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_8compress_CompressorBase.tp_dictoffset && __pyx_type_4borg_8compress_CompressorBase.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_8compress_CompressorBase.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_CompressorBase, (PyObject *)&__pyx_type_4borg_8compress_CompressorBase) < 0) __PYX_ERR(0, 52, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_8compress_CompressorBase) < 0) __PYX_ERR(0, 52, __pyx_L1_error) __pyx_ptype_4borg_8compress_CompressorBase = &__pyx_type_4borg_8compress_CompressorBase; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initcompress(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initcompress(void) #else __Pyx_PyMODINIT_FUNC PyInit_compress(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_compress(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_compress(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'compress' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_compress(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("compress", __pyx_methods, __pyx_k_borg_compress_Compression_is_ap, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_borg__compress) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "borg.compress")) { if (unlikely(PyDict_SetItemString(modules, "borg.compress", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "borg/compress.pyx":20 * """ * * import zlib # <<<<<<<<<<<<<< * * try: */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_zlib, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_zlib, __pyx_t_1) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":22 * import zlib * * try: # <<<<<<<<<<<<<< * import lzma * except ImportError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "borg/compress.pyx":23 * * try: * import lzma # <<<<<<<<<<<<<< * except ImportError: * lzma = None */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_lzma, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L2_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_lzma, __pyx_t_1) < 0) __PYX_ERR(0, 23, __pyx_L2_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":22 * import zlib * * try: # <<<<<<<<<<<<<< * import lzma * except ImportError: */ } __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L7_try_end; __pyx_L2_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":24 * try: * import lzma * except ImportError: # <<<<<<<<<<<<<< * lzma = None * */ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ImportError); if (__pyx_t_5) { __Pyx_AddTraceback("borg.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 24, __pyx_L4_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "borg/compress.pyx":25 * import lzma * except ImportError: * lzma = None # <<<<<<<<<<<<<< * * */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_lzma, Py_None) < 0) __PYX_ERR(0, 25, __pyx_L4_except_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L3_exception_handled; } goto __pyx_L4_except_error; __pyx_L4_except_error:; /* "borg/compress.pyx":22 * import zlib * * try: # <<<<<<<<<<<<<< * import lzma * except ImportError: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L3_exception_handled:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); __pyx_L7_try_end:; } /* "borg/compress.pyx":28 * * * from .helpers import Buffer, DecompressionError # <<<<<<<<<<<<<< * * API_VERSION = '1.1_06' */ __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_s_Buffer); __Pyx_GIVEREF(__pyx_n_s_Buffer); PyList_SET_ITEM(__pyx_t_7, 0, __pyx_n_s_Buffer); __Pyx_INCREF(__pyx_n_s_DecompressionError); __Pyx_GIVEREF(__pyx_n_s_DecompressionError); PyList_SET_ITEM(__pyx_t_7, 1, __pyx_n_s_DecompressionError); __pyx_t_6 = __Pyx_Import(__pyx_n_s_helpers, __pyx_t_7, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_ImportFrom(__pyx_t_6, __pyx_n_s_Buffer); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Buffer, __pyx_t_7) < 0) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_ImportFrom(__pyx_t_6, __pyx_n_s_DecompressionError); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_DecompressionError, __pyx_t_7) < 0) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":30 * from .helpers import Buffer, DecompressionError * * API_VERSION = '1.1_06' # <<<<<<<<<<<<<< * * cdef extern from "algorithms/lz4-libselect.h": */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_API_VERSION, __pyx_kp_u_1_1_06) < 0) __PYX_ERR(0, 30, __pyx_L1_error) /* "borg/compress.pyx":49 * * * buffer = Buffer(bytearray, size=0) # <<<<<<<<<<<<<< * * */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_Buffer); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 49, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 49, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(((PyObject *)(&PyByteArray_Type))); __Pyx_GIVEREF(((PyObject *)(&PyByteArray_Type))); PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)(&PyByteArray_Type))); __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_size, __pyx_int_0) < 0) __PYX_ERR(0, 49, __pyx_L1_error) __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 49, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_buffer, __pyx_t_8) < 0) __PYX_ERR(0, 49, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "borg/compress.pyx":58 * adding/stripping the ID header (which enable auto detection). * """ * ID = b'\xFF\xFF' # reserved and not used # <<<<<<<<<<<<<< * # overwrite with a unique 2-bytes bytestring in child classes * name = 'baseclass' */ if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase->tp_dict, __pyx_n_s_ID, __pyx_kp_b__7) < 0) __PYX_ERR(0, 58, __pyx_L1_error) PyType_Modified(__pyx_ptype_4borg_8compress_CompressorBase); /* "borg/compress.pyx":60 * ID = b'\xFF\xFF' # reserved and not used * # overwrite with a unique 2-bytes bytestring in child classes * name = 'baseclass' # <<<<<<<<<<<<<< * * @classmethod */ if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase->tp_dict, __pyx_n_s_name, __pyx_n_u_baseclass) < 0) __PYX_ERR(0, 60, __pyx_L1_error) PyType_Modified(__pyx_ptype_4borg_8compress_CompressorBase); /* "borg/compress.pyx":63 * * @classmethod * def detect(cls, data): # <<<<<<<<<<<<<< * return data.startswith(cls.ID) * */ __Pyx_GetNameInClass(__pyx_t_8, (PyObject *)__pyx_ptype_4borg_8compress_CompressorBase, __pyx_n_s_detect); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); /* "borg/compress.pyx":62 * name = 'baseclass' * * @classmethod # <<<<<<<<<<<<<< * def detect(cls, data): * return data.startswith(cls.ID) */ __pyx_t_1 = __Pyx_Method_ClassMethod(__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (PyDict_SetItem((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase->tp_dict, __pyx_n_s_detect, __pyx_t_1) < 0) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_ptype_4borg_8compress_CompressorBase); /* "borg/compress.pyx":101 * * * class CNONE(CompressorBase): # <<<<<<<<<<<<<< * """ * none - no compression, just pass through data */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __Pyx_GIVEREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __pyx_t_8 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_8, __pyx_t_1, __pyx_n_s_CNONE, __pyx_n_s_CNONE, (PyObject *) NULL, __pyx_n_s_borg_compress, __pyx_kp_s_none_no_compression_just_pass_t); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "borg/compress.pyx":105 * none - no compression, just pass through data * """ * ID = b'\x00\x00' # <<<<<<<<<<<<<< * name = 'none' * */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_ID, __pyx_kp_b__8) < 0) __PYX_ERR(0, 105, __pyx_L1_error) /* "borg/compress.pyx":106 * """ * ID = b'\x00\x00' * name = 'none' # <<<<<<<<<<<<<< * * def compress(self, data): */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_name, __pyx_n_u_none) < 0) __PYX_ERR(0, 106, __pyx_L1_error) /* "borg/compress.pyx":108 * name = 'none' * * def compress(self, data): # <<<<<<<<<<<<<< * return super().compress(data) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_5CNONE_1compress, 0, __pyx_n_s_CNONE_compress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__10)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_t_9); PyList_Append(__pyx_t_6, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_compress, __pyx_t_9) < 0) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":111 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * if not isinstance(data, bytes): */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_5CNONE_3decompress, 0, __pyx_n_s_CNONE_decompress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__12)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_t_9); PyList_Append(__pyx_t_6, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decompress, __pyx_t_9) < 0) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":101 * * * class CNONE(CompressorBase): # <<<<<<<<<<<<<< * """ * none - no compression, just pass through data */ __pyx_t_9 = __Pyx_Py3ClassCreate(__pyx_t_8, __pyx_n_s_CNONE, __pyx_t_1, __pyx_t_7, NULL, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_CyFunction_InitClassCell(__pyx_t_6, __pyx_t_9) < 0) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_CNONE, __pyx_t_9) < 0) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":118 * * * class LZ4(CompressorBase): # <<<<<<<<<<<<<< * """ * raw LZ4 compression / decompression (liblz4). */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __Pyx_GIVEREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __pyx_t_8 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_8, __pyx_t_1, __pyx_n_s_LZ4, __pyx_n_s_LZ4, (PyObject *) NULL, __pyx_n_s_borg_compress, __pyx_kp_s_raw_LZ4_compression_decompressi); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = PyList_New(0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "borg/compress.pyx":127 * - uses safe lz4 methods that never go beyond the end of the output buffer * """ * ID = b'\x01\x00' # <<<<<<<<<<<<<< * name = 'lz4' * */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_ID, __pyx_kp_b__13) < 0) __PYX_ERR(0, 127, __pyx_L1_error) /* "borg/compress.pyx":128 * """ * ID = b'\x01\x00' * name = 'lz4' # <<<<<<<<<<<<<< * * def __init__(self, **kwargs): */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_name, __pyx_n_u_lz4) < 0) __PYX_ERR(0, 128, __pyx_L1_error) /* "borg/compress.pyx":130 * name = 'lz4' * * def __init__(self, **kwargs): # <<<<<<<<<<<<<< * pass * */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_3LZ4_1__init__, 0, __pyx_n_s_LZ4___init, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__15)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_init, __pyx_t_6) < 0) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":133 * pass * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_3LZ4_3compress, 0, __pyx_n_s_LZ4_compress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__17)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_6); PyList_Append(__pyx_t_9, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_compress, __pyx_t_6) < 0) __PYX_ERR(0, 133, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":148 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_3LZ4_5decompress, 0, __pyx_n_s_LZ4_decompress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__19)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_6); PyList_Append(__pyx_t_9, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decompress, __pyx_t_6) < 0) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":118 * * * class LZ4(CompressorBase): # <<<<<<<<<<<<<< * """ * raw LZ4 compression / decompression (liblz4). */ __pyx_t_6 = __Pyx_Py3ClassCreate(__pyx_t_8, __pyx_n_s_LZ4, __pyx_t_1, __pyx_t_7, NULL, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_CyFunction_InitClassCell(__pyx_t_9, __pyx_t_6) < 0) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_LZ4, __pyx_t_6) < 0) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":177 * * * class LZMA(CompressorBase): # <<<<<<<<<<<<<< * """ * lzma compression / decompression */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __Pyx_GIVEREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __pyx_t_8 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_8, __pyx_t_1, __pyx_n_s_LZMA, __pyx_n_s_LZMA, (PyObject *) NULL, __pyx_n_s_borg_compress, __pyx_kp_s_lzma_compression_decompression); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "borg/compress.pyx":181 * lzma compression / decompression * """ * ID = b'\x02\x00' # <<<<<<<<<<<<<< * name = 'lzma' * */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_ID, __pyx_kp_b__20) < 0) __PYX_ERR(0, 181, __pyx_L1_error) /* "borg/compress.pyx":182 * """ * ID = b'\x02\x00' * name = 'lzma' # <<<<<<<<<<<<<< * * def __init__(self, level=6, **kwargs): */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_name, __pyx_n_u_lzma) < 0) __PYX_ERR(0, 182, __pyx_L1_error) /* "borg/compress.pyx":184 * name = 'lzma' * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4LZMA_1__init__, 0, __pyx_n_s_LZMA___init, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_t_9); PyList_Append(__pyx_t_6, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__23); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_init, __pyx_t_9) < 0) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":190 * raise ValueError('No lzma support found.') * * def compress(self, data): # <<<<<<<<<<<<<< * # we do not need integrity checks in lzma, we do that already * data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4LZMA_3compress, 0, __pyx_n_s_LZMA_compress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__25)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_t_9); PyList_Append(__pyx_t_6, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_compress, __pyx_t_9) < 0) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":195 * return super().compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * data = super().decompress(data) * try: */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4LZMA_5decompress, 0, __pyx_n_s_LZMA_decompress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__27)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_t_9); PyList_Append(__pyx_t_6, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decompress, __pyx_t_9) < 0) __PYX_ERR(0, 195, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":177 * * * class LZMA(CompressorBase): # <<<<<<<<<<<<<< * """ * lzma compression / decompression */ __pyx_t_9 = __Pyx_Py3ClassCreate(__pyx_t_8, __pyx_n_s_LZMA, __pyx_t_1, __pyx_t_7, NULL, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_CyFunction_InitClassCell(__pyx_t_6, __pyx_t_9) < 0) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_LZMA, __pyx_t_9) < 0) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":203 * * * class ZSTD(CompressorBase): # <<<<<<<<<<<<<< * """zstd compression / decompression (pypi: zstandard, gh: python-zstandard)""" * # This is a NOT THREAD SAFE implementation. */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __Pyx_GIVEREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __pyx_t_8 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_8, __pyx_t_1, __pyx_n_s_ZSTD, __pyx_n_s_ZSTD, (PyObject *) NULL, __pyx_n_s_borg_compress, __pyx_kp_s_zstd_compression_decompression_p); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = PyList_New(0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "borg/compress.pyx":208 * # Only ONE python context must to be created at a time. * # It should work flawlessly as long as borg will call ONLY ONE compression job at time. * ID = b'\x03\x00' # <<<<<<<<<<<<<< * name = 'zstd' * */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_ID, __pyx_kp_b__28) < 0) __PYX_ERR(0, 208, __pyx_L1_error) /* "borg/compress.pyx":209 * # It should work flawlessly as long as borg will call ONLY ONE compression job at time. * ID = b'\x03\x00' * name = 'zstd' # <<<<<<<<<<<<<< * * def __init__(self, level=3, **kwargs): */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_name, __pyx_n_u_zstd) < 0) __PYX_ERR(0, 209, __pyx_L1_error) /* "borg/compress.pyx":211 * name = 'zstd' * * def __init__(self, level=3, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4ZSTD_1__init__, 0, __pyx_n_s_ZSTD___init, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_6); PyList_Append(__pyx_t_9, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_6, __pyx_tuple__31); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_init, __pyx_t_6) < 0) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":215 * self.level = level * * def compress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4ZSTD_3compress, 0, __pyx_n_s_ZSTD_compress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__33)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_6); PyList_Append(__pyx_t_9, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_compress, __pyx_t_6) < 0) __PYX_ERR(0, 215, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":232 * return super().compress(dest[:osize]) * * def decompress(self, idata): # <<<<<<<<<<<<<< * if not isinstance(idata, bytes): * idata = bytes(idata) # code below does not work with memoryview */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4ZSTD_5decompress, 0, __pyx_n_s_ZSTD_decompress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__35)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_6); PyList_Append(__pyx_t_9, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decompress, __pyx_t_6) < 0) __PYX_ERR(0, 232, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":203 * * * class ZSTD(CompressorBase): # <<<<<<<<<<<<<< * """zstd compression / decompression (pypi: zstandard, gh: python-zstandard)""" * # This is a NOT THREAD SAFE implementation. */ __pyx_t_6 = __Pyx_Py3ClassCreate(__pyx_t_8, __pyx_n_s_ZSTD, __pyx_t_1, __pyx_t_7, NULL, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_CyFunction_InitClassCell(__pyx_t_9, __pyx_t_6) < 0) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_ZSTD, __pyx_t_6) < 0) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":260 * * * class ZLIB(CompressorBase): # <<<<<<<<<<<<<< * """ * zlib compression / decompression (python stdlib) */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __Pyx_GIVEREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __pyx_t_8 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_8, __pyx_t_1, __pyx_n_s_ZLIB, __pyx_n_s_ZLIB, (PyObject *) NULL, __pyx_n_s_borg_compress, __pyx_kp_s_zlib_compression_decompression); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "borg/compress.pyx":264 * zlib compression / decompression (python stdlib) * """ * ID = b'\x08\x00' # not used here, see detect() # <<<<<<<<<<<<<< * # avoid all 0x.8.. IDs elsewhere! * name = 'zlib' */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_ID, __pyx_kp_b__36) < 0) __PYX_ERR(0, 264, __pyx_L1_error) /* "borg/compress.pyx":266 * ID = b'\x08\x00' # not used here, see detect() * # avoid all 0x.8.. IDs elsewhere! * name = 'zlib' # <<<<<<<<<<<<<< * * @classmethod */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_name, __pyx_n_u_zlib) < 0) __PYX_ERR(0, 266, __pyx_L1_error) /* "borg/compress.pyx":269 * * @classmethod * def detect(cls, data): # <<<<<<<<<<<<<< * # matches misc. patterns 0x.8.. used by zlib * cmf, flg = data[:2] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4ZLIB_1detect, __Pyx_CYFUNCTION_CLASSMETHOD, __pyx_n_s_ZLIB_detect, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__38)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "borg/compress.pyx":268 * name = 'zlib' * * @classmethod # <<<<<<<<<<<<<< * def detect(cls, data): * # matches misc. patterns 0x.8.. used by zlib */ __pyx_t_10 = __Pyx_Method_ClassMethod(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_detect, __pyx_t_10) < 0) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":276 * return check_ok and is_deflate * * def __init__(self, level=6, **kwargs): # <<<<<<<<<<<<<< * super().__init__(**kwargs) * self.level = level */ __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4ZLIB_3__init__, 0, __pyx_n_s_ZLIB___init, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__40)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_t_10); PyList_Append(__pyx_t_6, __pyx_t_10); __Pyx_GIVEREF(__pyx_t_10); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_10, __pyx_tuple__41); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_init, __pyx_t_10) < 0) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":280 * self.level = level * * def compress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not add ID bytes * return zlib.compress(data, self.level) */ __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4ZLIB_5compress, 0, __pyx_n_s_ZLIB_compress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__43)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_compress, __pyx_t_10) < 0) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":284 * return zlib.compress(data, self.level) * * def decompress(self, data): # <<<<<<<<<<<<<< * # note: for compatibility no super call, do not strip ID bytes * try: */ __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4ZLIB_7decompress, 0, __pyx_n_s_ZLIB_decompress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__45)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decompress, __pyx_t_10) < 0) __PYX_ERR(0, 284, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":260 * * * class ZLIB(CompressorBase): # <<<<<<<<<<<<<< * """ * zlib compression / decompression (python stdlib) */ __pyx_t_10 = __Pyx_Py3ClassCreate(__pyx_t_8, __pyx_n_s_ZLIB, __pyx_t_1, __pyx_t_7, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__Pyx_CyFunction_InitClassCell(__pyx_t_6, __pyx_t_10) < 0) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_ZLIB, __pyx_t_10) < 0) __PYX_ERR(0, 260, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":292 * * * class Auto(CompressorBase): # <<<<<<<<<<<<<< * """ * Meta-Compressor that decides which compression to use based on LZ4's ratio. */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __Pyx_GIVEREF(((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_ptype_4borg_8compress_CompressorBase)); __pyx_t_8 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_8, __pyx_t_1, __pyx_n_s_Auto, __pyx_n_s_Auto, (PyObject *) NULL, __pyx_n_s_borg_compress, __pyx_kp_s_Meta_Compressor_that_decides_wh); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_10 = PyList_New(0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); /* "borg/compress.pyx":300 * """ * * ID = None # <<<<<<<<<<<<<< * name = 'auto' * */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_ID, Py_None) < 0) __PYX_ERR(0, 300, __pyx_L1_error) /* "borg/compress.pyx":301 * * ID = None * name = 'auto' # <<<<<<<<<<<<<< * * def __init__(self, compressor): */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_name, __pyx_n_u_auto) < 0) __PYX_ERR(0, 301, __pyx_L1_error) /* "borg/compress.pyx":303 * name = 'auto' * * def __init__(self, compressor): # <<<<<<<<<<<<<< * super().__init__() * self.compressor = compressor */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4Auto_1__init__, 0, __pyx_n_s_Auto___init, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__47)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_6); PyList_Append(__pyx_t_10, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_init, __pyx_t_6) < 0) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":309 * self.none = get_compressor('none') * * def _decide(self, data): # <<<<<<<<<<<<<< * """ * Decides what to do with *data*. Returns (compressor, lz4_data). */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4Auto_3_decide, 0, __pyx_n_s_Auto__decide, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__49)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decide, __pyx_t_6) < 0) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":326 * return self.none, None * * def decide(self, data): # <<<<<<<<<<<<<< * return self._decide(data)[0] * */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4Auto_5decide, 0, __pyx_n_s_Auto_decide, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__51)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decide_2, __pyx_t_6) < 0) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":329 * return self._decide(data)[0] * * def compress(self, data): # <<<<<<<<<<<<<< * compressor, lz4_data = self._decide(data) * if compressor is self.lz4: */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4Auto_7compress, 0, __pyx_n_s_Auto_compress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__53)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 329, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_compress, __pyx_t_6) < 0) __PYX_ERR(0, 329, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":351 * return lz4_data * * def decompress(self, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4Auto_9decompress, 0, __pyx_n_s_Auto_decompress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__55)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 351, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_decompress, __pyx_t_6) < 0) __PYX_ERR(0, 351, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":354 * raise NotImplementedError * * def detect(cls, data): # <<<<<<<<<<<<<< * raise NotImplementedError * */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_4Auto_11detect, 0, __pyx_n_s_Auto_detect, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__57)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_detect, __pyx_t_6) < 0) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":292 * * * class Auto(CompressorBase): # <<<<<<<<<<<<<< * """ * Meta-Compressor that decides which compression to use based on LZ4's ratio. */ __pyx_t_6 = __Pyx_Py3ClassCreate(__pyx_t_8, __pyx_n_s_Auto, __pyx_t_1, __pyx_t_7, NULL, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_CyFunction_InitClassCell(__pyx_t_10, __pyx_t_6) < 0) __PYX_ERR(0, 292, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_Auto, __pyx_t_6) < 0) __PYX_ERR(0, 292, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":360 * # Maps valid compressor names to their class * COMPRESSOR_TABLE = { * CNONE.name: CNONE, # <<<<<<<<<<<<<< * LZ4.name: LZ4, * ZLIB.name: ZLIB, */ __pyx_t_1 = __Pyx_PyDict_NewPresized(6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_CNONE); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_name); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_CNONE); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_1, __pyx_t_7, __pyx_t_8) < 0) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "borg/compress.pyx":361 * COMPRESSOR_TABLE = { * CNONE.name: CNONE, * LZ4.name: LZ4, # <<<<<<<<<<<<<< * ZLIB.name: ZLIB, * LZMA.name: LZMA, */ __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_LZ4); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_name); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_LZ4); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_1, __pyx_t_7, __pyx_t_8) < 0) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "borg/compress.pyx":362 * CNONE.name: CNONE, * LZ4.name: LZ4, * ZLIB.name: ZLIB, # <<<<<<<<<<<<<< * LZMA.name: LZMA, * Auto.name: Auto, */ __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_ZLIB); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 362, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_name); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 362, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_ZLIB); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 362, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_1, __pyx_t_7, __pyx_t_8) < 0) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "borg/compress.pyx":363 * LZ4.name: LZ4, * ZLIB.name: ZLIB, * LZMA.name: LZMA, # <<<<<<<<<<<<<< * Auto.name: Auto, * ZSTD.name: ZSTD, */ __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_LZMA); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 363, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_name); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 363, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_LZMA); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 363, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_1, __pyx_t_7, __pyx_t_8) < 0) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "borg/compress.pyx":364 * ZLIB.name: ZLIB, * LZMA.name: LZMA, * Auto.name: Auto, # <<<<<<<<<<<<<< * ZSTD.name: ZSTD, * } */ __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_Auto); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_name); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_Auto); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_1, __pyx_t_7, __pyx_t_8) < 0) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "borg/compress.pyx":365 * LZMA.name: LZMA, * Auto.name: Auto, * ZSTD.name: ZSTD, # <<<<<<<<<<<<<< * } * # List of possible compression types. Does not include Auto, since it is a meta-Compressor. */ __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_ZSTD); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 365, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_name); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 365, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_ZSTD); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 365, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_1, __pyx_t_7, __pyx_t_8) < 0) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_COMPRESSOR_TABLE, __pyx_t_1) < 0) __PYX_ERR(0, 359, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "borg/compress.pyx":368 * } * # List of possible compression types. Does not include Auto, since it is a meta-Compressor. * COMPRESSOR_LIST = [LZ4, ZSTD, CNONE, ZLIB, LZMA, ] # check fast stuff first # <<<<<<<<<<<<<< * * def get_compressor(name, **kwargs): */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_LZ4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_ZSTD); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_CNONE); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_ZLIB); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_LZMA); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_9 = PyList_New(5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_8); PyList_SET_ITEM(__pyx_t_9, 1, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_7); PyList_SET_ITEM(__pyx_t_9, 2, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyList_SET_ITEM(__pyx_t_9, 3, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_10); PyList_SET_ITEM(__pyx_t_9, 4, __pyx_t_10); __pyx_t_1 = 0; __pyx_t_8 = 0; __pyx_t_7 = 0; __pyx_t_6 = 0; __pyx_t_10 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_COMPRESSOR_LIST, __pyx_t_9) < 0) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":370 * COMPRESSOR_LIST = [LZ4, ZSTD, CNONE, ZLIB, LZMA, ] # check fast stuff first * * def get_compressor(name, **kwargs): # <<<<<<<<<<<<<< * cls = COMPRESSOR_TABLE[name] * return cls(**kwargs) */ __pyx_t_9 = PyCFunction_NewEx(&__pyx_mdef_4borg_8compress_1get_compressor, NULL, __pyx_n_s_borg_compress); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 370, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_compressor, __pyx_t_9) < 0) __PYX_ERR(0, 370, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":375 * * * class Compressor: # <<<<<<<<<<<<<< * """ * compresses using a compressor with given name and parameters */ __pyx_t_9 = __Pyx_Py3MetaclassPrepare((PyObject *) NULL, __pyx_empty_tuple, __pyx_n_s_Compressor, __pyx_n_s_Compressor, (PyObject *) NULL, __pyx_n_s_borg_compress, __pyx_kp_s_compresses_using_a_compressor_w); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 375, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "borg/compress.pyx":380 * decompresses everything we can handle (autodetect) * """ * def __init__(self, name='null', **kwargs): # <<<<<<<<<<<<<< * self.params = kwargs * self.compressor = get_compressor(name, **self.params) */ __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_10Compressor_1__init__, 0, __pyx_n_s_Compressor___init, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__61)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_10, __pyx_tuple__62); if (__Pyx_SetNameInClass(__pyx_t_9, __pyx_n_s_init, __pyx_t_10) < 0) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":384 * self.compressor = get_compressor(name, **self.params) * * def compress(self, data): # <<<<<<<<<<<<<< * return self.compressor.compress(data) * */ __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_10Compressor_3compress, 0, __pyx_n_s_Compressor_compress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__64)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 384, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__Pyx_SetNameInClass(__pyx_t_9, __pyx_n_s_compress, __pyx_t_10) < 0) __PYX_ERR(0, 384, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":387 * return self.compressor.compress(data) * * def decompress(self, data): # <<<<<<<<<<<<<< * compressor_cls = self.detect(data) * return compressor_cls(**self.params).decompress(data) */ __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_10Compressor_5decompress, 0, __pyx_n_s_Compressor_decompress, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__66)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__Pyx_SetNameInClass(__pyx_t_9, __pyx_n_s_decompress, __pyx_t_10) < 0) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":392 * * @staticmethod * def detect(data): # <<<<<<<<<<<<<< * hdr = bytes(data[:2]) # detect() does not work with memoryview * for cls in COMPRESSOR_LIST: */ __pyx_t_10 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_10Compressor_7detect, __Pyx_CYFUNCTION_STATICMETHOD, __pyx_n_s_Compressor_detect, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__68)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); /* "borg/compress.pyx":391 * return compressor_cls(**self.params).decompress(data) * * @staticmethod # <<<<<<<<<<<<<< * def detect(data): * hdr = bytes(data[:2]) # detect() does not work with memoryview */ __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_staticmethod, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (__Pyx_SetNameInClass(__pyx_t_9, __pyx_n_s_detect, __pyx_t_6) < 0) __PYX_ERR(0, 392, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":375 * * * class Compressor: # <<<<<<<<<<<<<< * """ * compresses using a compressor with given name and parameters */ __pyx_t_6 = __Pyx_Py3ClassCreate(((PyObject*)&__Pyx_DefaultClassType), __pyx_n_s_Compressor, __pyx_empty_tuple, __pyx_t_9, NULL, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 375, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Compressor, __pyx_t_6) < 0) __PYX_ERR(0, 375, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":401 * * * class CompressionSpec: # <<<<<<<<<<<<<< * def __init__(self, s): * values = s.split(',') */ __pyx_t_9 = __Pyx_Py3MetaclassPrepare((PyObject *) NULL, __pyx_empty_tuple, __pyx_n_s_CompressionSpec, __pyx_n_s_CompressionSpec, (PyObject *) NULL, __pyx_n_s_borg_compress, (PyObject *) NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "borg/compress.pyx":402 * * class CompressionSpec: * def __init__(self, s): # <<<<<<<<<<<<<< * values = s.split(',') * count = len(values) */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_15CompressionSpec_1__init__, 0, __pyx_n_s_CompressionSpec___init, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__70)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetNameInClass(__pyx_t_9, __pyx_n_s_init, __pyx_t_6) < 0) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "borg/compress.pyx":441 * * @property * def compressor(self): # <<<<<<<<<<<<<< * if self.name in ('none', 'lz4', ): * return get_compressor(self.name) */ __pyx_t_6 = __Pyx_CyFunction_New(&__pyx_mdef_4borg_8compress_15CompressionSpec_3compressor, 0, __pyx_n_s_CompressionSpec_compressor, NULL, __pyx_n_s_borg_compress, __pyx_d, ((PyObject *)__pyx_codeobj__72)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 441, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); /* "borg/compress.pyx":440 * raise ValueError * * @property # <<<<<<<<<<<<<< * def compressor(self): * if self.name in ('none', 'lz4', ): */ __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_property, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 440, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_SetNameInClass(__pyx_t_9, __pyx_n_s_compressor, __pyx_t_10) < 0) __PYX_ERR(0, 441, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "borg/compress.pyx":401 * * * class CompressionSpec: # <<<<<<<<<<<<<< * def __init__(self, s): * values = s.split(',') */ __pyx_t_10 = __Pyx_Py3ClassCreate(((PyObject*)&__Pyx_DefaultClassType), __pyx_n_s_CompressionSpec, __pyx_empty_tuple, __pyx_t_9, NULL, 0, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (PyDict_SetItem(__pyx_d, __pyx_n_s_CompressionSpec, __pyx_t_10) < 0) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "(tree fragment)":1 * def __pyx_unpickle_CompressorBase(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_9 = PyCFunction_NewEx(&__pyx_mdef_4borg_8compress_3__pyx_unpickle_CompressorBase, NULL, __pyx_n_s_borg_compress); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_CompressorBase, __pyx_t_9) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "borg/compress.pyx":1 * # cython: language_level=3 # <<<<<<<<<<<<<< * * """ */ __pyx_t_9 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_9) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init borg.compress", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init borg.compress"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* KeywordStringCheck */ static int __Pyx_CheckKeywordStrings( PyObject *kwdict, const char* function_name, int kw_allowed) { PyObject* key = 0; Py_ssize_t pos = 0; #if CYTHON_COMPILING_IN_PYPY if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0)) goto invalid_keyword; return 1; #else while (PyDict_Next(kwdict, &pos, &key, 0)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyString_Check(key))) #endif if (unlikely(!PyUnicode_Check(key))) goto invalid_keyword_type; } if ((!kw_allowed) && unlikely(key)) goto invalid_keyword; return 1; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); return 0; #endif invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif return 0; } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_USE_TYPE_SLOTS PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_USE_TYPE_SLOTS result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyIntFromDouble */ #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value) { if (value >= (double)LONG_MIN && value <= (double)LONG_MAX) { return PyInt_FromLong((long)value); } return PyLong_FromDouble(value); } #endif /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* PyObjectSetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_setattro)) return tp->tp_setattro(obj, attr_name, value); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_setattr)) return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); #endif return PyObject_SetAttr(obj, attr_name, value); } #endif /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; iexc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* PyIntCompare */ static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { if (op1 == op2) { Py_RETURN_TRUE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { int unequal; unsigned long uintval; Py_ssize_t size = Py_SIZE(op1); const digit* digits = ((PyLongObject*)op1)->ob_digit; if (intval == 0) { if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } else if (intval < 0) { if (size >= 0) Py_RETURN_FALSE; intval = -intval; size = -size; } else { if (size <= 0) Py_RETURN_FALSE; } uintval = (unsigned long) intval; #if PyLong_SHIFT * 4 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 4)) { unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 3 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 3)) { unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 2 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 2)) { unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 1 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 1)) { unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } return ( PyObject_RichCompare(op1, op2, Py_EQ)); } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AndObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); return PyInt_FromLong(a & b); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_and(op1, op2); } } x = a & b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla & llb; return PyLong_FromLongLong(llx); #endif } #endif return (inplace ? PyNumber_InPlaceAnd : PyNumber_And)(op1, op2); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY #if PY_MAJOR_VERSION < 3 || CYTHON_USE_PYLONG_INTERNALS #define __Pyx_PyInt_RemainderObjC_ZeroDivisionError(operand)\ if (unlikely(zerodivision_check && ((operand) == 0))) {\ PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");\ return NULL;\ } #endif static PyObject* __Pyx_PyInt_RemainderObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); __Pyx_PyInt_RemainderObjC_ZeroDivisionError(b) x = a % b; x += ((x != 0) & ((x ^ b) < 0)) * b; return PyInt_FromLong(x); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_remainder(op1, op2); } } __Pyx_PyInt_RemainderObjC_ZeroDivisionError(b) x = a % b; x += ((x != 0) & ((x ^ b) < 0)) * b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla % llb; llx += ((llx != 0) & ((llx ^ llb) < 0)) * llb; return PyLong_FromLongLong(llx); #endif } #endif return (inplace ? PyNumber_InPlaceRemainder : PyNumber_Remainder)(op1, op2); } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else if (likely(PyCFunction_Check(func))) #endif { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* ClassMethod */ static PyObject* __Pyx_Method_ClassMethod(PyObject *method) { #if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM <= 0x05080000 if (PyObject_TypeCheck(method, &PyWrapperDescr_Type)) { return PyClassMethod_New(method); } #else #if CYTHON_COMPILING_IN_PYSTON || CYTHON_COMPILING_IN_PYPY if (PyMethodDescr_Check(method)) #else #if PY_MAJOR_VERSION == 2 static PyTypeObject *methoddescr_type = NULL; if (methoddescr_type == NULL) { PyObject *meth = PyObject_GetAttrString((PyObject*)&PyList_Type, "append"); if (!meth) return NULL; methoddescr_type = Py_TYPE(meth); Py_DECREF(meth); } #else PyTypeObject *methoddescr_type = &PyMethodDescr_Type; #endif if (__Pyx_TypeCheck(method, methoddescr_type)) #endif { PyMethodDescrObject *descr = (PyMethodDescrObject *)method; #if PY_VERSION_HEX < 0x03020000 PyTypeObject *d_type = descr->d_type; #else PyTypeObject *d_type = descr->d_common.d_type; #endif return PyDescr_NewClassMethod(d_type, descr->d_method); } #endif else if (PyMethod_Check(method)) { return PyClassMethod_New(PyMethod_GET_FUNCTION(method)); } else { return PyClassMethod_New(method); } } /* GetNameInClass */ static PyObject *__Pyx_GetGlobalNameAfterAttributeLookup(PyObject *name) { PyObject *result; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); __Pyx_GetModuleGlobalNameUncached(result, name); return result; } static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) { PyObject *result; result = __Pyx_PyObject_GetAttrStr(nmspace, name); if (!result) { result = __Pyx_GetGlobalNameAfterAttributeLookup(name); } return result; } /* CalculateMetaclass */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); for (i=0; i < nbases; i++) { PyTypeObject *tmptype; PyObject *tmp = PyTuple_GET_ITEM(bases, i); tmptype = Py_TYPE(tmp); #if PY_MAJOR_VERSION < 3 if (tmptype == &PyClass_Type) continue; #endif if (!metaclass) { metaclass = tmptype; continue; } if (PyType_IsSubtype(metaclass, tmptype)) continue; if (PyType_IsSubtype(tmptype, metaclass)) { metaclass = tmptype; continue; } PyErr_SetString(PyExc_TypeError, "metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases"); return NULL; } if (!metaclass) { #if PY_MAJOR_VERSION < 3 metaclass = &PyClass_Type; #else metaclass = &PyType_Type; #endif } Py_INCREF((PyObject*) metaclass); return (PyObject*) metaclass; } /* FetchCommonType */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { PyObject* fake_module; PyTypeObject* cached_type = NULL; fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); if (!fake_module) return NULL; Py_INCREF(fake_module); cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); if (cached_type) { if (!PyType_Check((PyObject*)cached_type)) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s is not a type object", type->tp_name); goto bad; } if (cached_type->tp_basicsize != type->tp_basicsize) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s has the wrong size, try recompiling", type->tp_name); goto bad; } } else { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); if (PyType_Ready(type) < 0) goto bad; if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) goto bad; Py_INCREF(type); cached_type = type; } done: Py_DECREF(fake_module); return cached_type; bad: Py_XDECREF(cached_type); cached_type = NULL; goto done; } /* CythonFunctionShared */ #include static PyObject * __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) { if (unlikely(op->func_doc == NULL)) { if (op->func.m_ml->ml_doc) { #if PY_MAJOR_VERSION >= 3 op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); #else op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); #endif if (unlikely(op->func_doc == NULL)) return NULL; } else { Py_INCREF(Py_None); return Py_None; } } Py_INCREF(op->func_doc); return op->func_doc; } static int __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp = op->func_doc; if (value == NULL) { value = Py_None; } Py_INCREF(value); op->func_doc = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); #else op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); #endif if (unlikely(op->func_name == NULL)) return NULL; } Py_INCREF(op->func_name); return op->func_name; } static int __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = op->func_name; Py_INCREF(value); op->func_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int __Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = op->func_qualname; Py_INCREF(value); op->func_qualname = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) { PyObject *self; self = m->func_closure; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyObject * __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); if (unlikely(op->func_dict == NULL)) return NULL; } Py_INCREF(op->func_dict); return op->func_dict; } static int __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; if (unlikely(value == NULL)) { PyErr_SetString(PyExc_TypeError, "function's dictionary may not be deleted"); return -1; } if (unlikely(!PyDict_Check(value))) { PyErr_SetString(PyExc_TypeError, "setting function's dictionary to a non-dict"); return -1; } tmp = op->func_dict; Py_INCREF(value); op->func_dict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * __Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(Py_None); return Py_None; } static PyObject * __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); return result; } static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); #else op->defaults_tuple = PySequence_ITEM(res, 0); if (unlikely(!op->defaults_tuple)) result = -1; else { op->defaults_kwdict = PySequence_ITEM(res, 1); if (unlikely(!op->defaults_kwdict)) result = -1; } #endif Py_DECREF(res); return result; } static int __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyTuple_Check(value)) { PyErr_SetString(PyExc_TypeError, "__defaults__ must be set to a tuple object"); return -1; } Py_INCREF(value); tmp = op->defaults_tuple; op->defaults_tuple = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_tuple; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__kwdefaults__ must be set to a dict object"); return -1; } Py_INCREF(value); tmp = op->defaults_kwdict; op->defaults_kwdict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_kwdict; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; } else if (!PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__annotations__ must be set to a dict object"); return -1; } Py_XINCREF(value); tmp = op->func_annotations; op->func_annotations = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); if (unlikely(!result)) return NULL; op->func_annotations = result; } Py_INCREF(result); return result; } static PyGetSetDef __pyx_CyFunction_getsets[] = { {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, {0, 0, 0, 0, 0} }; static PyMemberDef __pyx_CyFunction_members[] = { {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0}, {0, 0, 0, 0, 0} }; static PyObject * __Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(m->func.m_ml->ml_name); #else return PyString_FromString(m->func.m_ml->ml_name); #endif } static PyMethodDef __pyx_CyFunction_methods[] = { {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, {0, 0, 0, 0} }; #if PY_VERSION_HEX < 0x030500A0 #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) #else #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) #endif static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { if (unlikely(op == NULL)) return NULL; op->flags = flags; __Pyx_CyFunction_weakreflist(op) = NULL; op->func.m_ml = ml; op->func.m_self = (PyObject *) op; Py_XINCREF(closure); op->func_closure = closure; Py_XINCREF(module); op->func.m_module = module; op->func_dict = NULL; op->func_name = NULL; Py_INCREF(qualname); op->func_qualname = qualname; op->func_doc = NULL; op->func_classobj = NULL; op->func_globals = globals; Py_INCREF(op->func_globals); Py_XINCREF(code); op->func_code = code; op->defaults_pyobjects = 0; op->defaults_size = 0; op->defaults = NULL; op->defaults_tuple = NULL; op->defaults_kwdict = NULL; op->defaults_getter = NULL; op->func_annotations = NULL; return (PyObject *) op; } static int __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) { Py_CLEAR(m->func_closure); Py_CLEAR(m->func.m_module); Py_CLEAR(m->func_dict); Py_CLEAR(m->func_name); Py_CLEAR(m->func_qualname); Py_CLEAR(m->func_doc); Py_CLEAR(m->func_globals); Py_CLEAR(m->func_code); Py_CLEAR(m->func_classobj); Py_CLEAR(m->defaults_tuple); Py_CLEAR(m->defaults_kwdict); Py_CLEAR(m->func_annotations); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); PyObject_Free(m->defaults); m->defaults = NULL; } return 0; } static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) { if (__Pyx_CyFunction_weakreflist(m) != NULL) PyObject_ClearWeakRefs((PyObject *) m); __Pyx_CyFunction_clear(m); PyObject_GC_Del(m); } static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) { PyObject_GC_UnTrack(m); __Pyx__CyFunction_dealloc(m); } static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->func_closure); Py_VISIT(m->func.m_module); Py_VISIT(m->func_dict); Py_VISIT(m->func_name); Py_VISIT(m->func_qualname); Py_VISIT(m->func_doc); Py_VISIT(m->func_globals); Py_VISIT(m->func_code); Py_VISIT(m->func_classobj); Py_VISIT(m->defaults_tuple); Py_VISIT(m->defaults_kwdict); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_VISIT(pydefaults[i]); } return 0; } static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) { #if PY_MAJOR_VERSION < 3 __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(func); return func; } if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; #endif return __Pyx_PyMethod_New(func, obj, type); } static PyObject* __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("", op->func_qualname, (void *)op); #else return PyString_FromFormat("", PyString_AsString(op->func_qualname), (void *)op); #endif } static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = f->m_ml->ml_meth; Py_ssize_t size; switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); case METH_NOARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 1)) { PyObject *result, *arg0; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS arg0 = PyTuple_GET_ITEM(arg, 0); #else arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; #endif result = (*meth)(self, arg0); #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) Py_DECREF(arg0); #endif return result; } PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "__Pyx_CyFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); } static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { PyObject *result; __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { Py_ssize_t argc; PyObject *new_args; PyObject *self; argc = PyTuple_GET_SIZE(args); new_args = PyTuple_GetSlice(args, 1, argc); if (unlikely(!new_args)) return NULL; self = PyTuple_GetItem(args, 0); if (unlikely(!self)) { Py_DECREF(new_args); return NULL; } result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); Py_DECREF(new_args); } else { result = __Pyx_CyFunction_Call(func, args, kw); } return result; } static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", sizeof(__pyx_CyFunctionObject), 0, (destructor) __Pyx_CyFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif (reprfunc) __Pyx_CyFunction_repr, 0, 0, 0, 0, __Pyx_CyFunction_CallAsMethod, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, 0, (traverseproc) __Pyx_CyFunction_traverse, (inquiry) __Pyx_CyFunction_clear, 0, #if PY_VERSION_HEX < 0x030500A0 offsetof(__pyx_CyFunctionObject, func_weakreflist), #else offsetof(PyCFunctionObject, m_weakreflist), #endif 0, 0, __pyx_CyFunction_methods, __pyx_CyFunction_members, __pyx_CyFunction_getsets, 0, 0, __Pyx_CyFunction_descr_get, 0, offsetof(__pyx_CyFunctionObject, func_dict), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif #if PY_VERSION_HEX >= 0x030800b1 0, #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, #endif }; static int __pyx_CyFunction_init(void) { __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (unlikely(__pyx_CyFunctionType == NULL)) { return -1; } return 0; } static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults = PyObject_Malloc(size); if (unlikely(!m->defaults)) return PyErr_NoMemory(); memset(m->defaults, 0, size); m->defaults_pyobjects = pyobjects; m->defaults_size = size; return m->defaults; } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_tuple = tuple; Py_INCREF(tuple); } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_kwdict = dict; Py_INCREF(dict); } static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->func_annotations = dict; Py_INCREF(dict); } /* CythonFunction */ static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { PyObject *op = __Pyx_CyFunction_Init( PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), ml, flags, qualname, closure, module, globals, code ); if (likely(op)) { PyObject_GC_Track(op); } return op; } /* Py3ClassCreate */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { PyObject *ns; if (metaclass) { PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); if (prep) { PyObject *pargs = PyTuple_Pack(2, name, bases); if (unlikely(!pargs)) { Py_DECREF(prep); return NULL; } ns = PyObject_Call(prep, pargs, mkw); Py_DECREF(prep); Py_DECREF(pargs); } else { if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; PyErr_Clear(); ns = PyDict_New(); } } else { ns = PyDict_New(); } if (unlikely(!ns)) return NULL; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; return ns; bad: Py_DECREF(ns); return NULL; } static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass) { PyObject *result, *margs; PyObject *owned_metaclass = NULL; if (allow_py2_metaclass) { owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); if (owned_metaclass) { metaclass = owned_metaclass; } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { PyErr_Clear(); } else { return NULL; } } if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); Py_XDECREF(owned_metaclass); if (unlikely(!metaclass)) return NULL; owned_metaclass = metaclass; } margs = PyTuple_Pack(3, name, bases, dict); if (unlikely(!margs)) { result = NULL; } else { result = PyObject_Call(metaclass, margs, mkw); Py_DECREF(margs); } Py_XDECREF(owned_metaclass); return result; } /* CyFunctionClassCell */ static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj) { Py_ssize_t i, count = PyList_GET_SIZE(cyfunctions); for (i = 0; i < count; i++) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS PyList_GET_ITEM(cyfunctions, i); #else PySequence_ITEM(cyfunctions, i); if (unlikely(!m)) return -1; #endif Py_INCREF(classobj); m->func_classobj = classobj; #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) Py_DECREF((PyObject*)m); #endif } return 0; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) ((unsigned PY_LONG_LONG) 0 - (unsigned PY_LONG_LONG) 1), const_zero = (unsigned PY_LONG_LONG) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(unsigned PY_LONG_LONG) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(unsigned PY_LONG_LONG) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(unsigned PY_LONG_LONG), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ borgbackup-1.1.15/src/borg/algorithms/0000755000175000017500000000000013771325773017604 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd-libselect.h0000644000175000017500000000012213771325506022672 0ustar useruser00000000000000#ifdef BORG_USE_LIBZSTD #include #else #include "zstd/lib/zstd.h" #endif borgbackup-1.1.15/src/borg/algorithms/__init__.py0000644000175000017500000000044413771325506021711 0ustar useruser00000000000000""" borg.algorithms =============== This package is intended for hash and checksum functions. Ideally these would be sourced from existing libraries, but are frequently not available yet (blake2), are available but in poor form (crc32) or don't really make sense as a library (xxHash). """ borgbackup-1.1.15/src/borg/algorithms/checksums.c0000644000175000017500000072233313771325772021746 0ustar useruser00000000000000/* Generated by Cython 0.29.21 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 1 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__borg__algorithms__checksums #define __PYX_HAVE_API__borg__algorithms__checksums /* Early includes */ #include #include #include #include "crc32_dispatch.c" #include "../algorithms/xxhash-libselect.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "stringsource", "src/borg/algorithms/checksums.pyx", "type.pxd", }; /*--- Type declarations ---*/ struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64; /* "borg/algorithms/checksums.pyx":84 * * * cdef class StreamingXXH64: # <<<<<<<<<<<<<< * cdef XXH64_state_t state * */ struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 { PyObject_HEAD XXH64_state_t state; }; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint32_t(uint32_t value); /* CIntFromPy.proto */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_As_unsigned_PY_LONG_LONG(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'libc.stdint' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'borg.algorithms.checksums' */ static PyTypeObject *__pyx_ptype_4borg_10algorithms_9checksums_StreamingXXH64 = 0; static Py_buffer __pyx_f_4borg_10algorithms_9checksums_ro_buffer(PyObject *); /*proto*/ #define __Pyx_MODULE_NAME "borg.algorithms.checksums" extern int __pyx_module_is_main_borg__algorithms__checksums; int __pyx_module_is_main_borg__algorithms__checksums = 0; /* Implementation of 'borg.algorithms.checksums' */ static PyObject *__pyx_builtin_TypeError; static const char __pyx_k_val[] = "val"; static const char __pyx_k_data[] = "data"; static const char __pyx_k_hash[] = "hash"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_seed[] = "seed"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_crc32[] = "crc32"; static const char __pyx_k_value[] = "value"; static const char __pyx_k_xxh64[] = "xxh64"; static const char __pyx_k_digest[] = "digest"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_seed_2[] = "_seed"; static const char __pyx_k_helpers[] = "helpers"; static const char __pyx_k_data_buf[] = "data_buf"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_bin_to_hex[] = "bin_to_hex"; static const char __pyx_k_have_clmul[] = "have_clmul"; static const char __pyx_k_crc32_clmul[] = "crc32_clmul"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_StreamingXXH64[] = "StreamingXXH64"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_crc32_slice_by_8[] = "crc32_slice_by_8"; static const char __pyx_k_XXH64_reset_failed[] = "XXH64_reset failed"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_XXH64_update_failed[] = "XXH64_update failed"; static const char __pyx_k_borg_algorithms_checksums[] = "borg.algorithms.checksums"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_src_borg_algorithms_checksums_py[] = "src/borg/algorithms/checksums.pyx"; static PyObject *__pyx_n_s_StreamingXXH64; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_u_XXH64_reset_failed; static PyObject *__pyx_kp_u_XXH64_update_failed; static PyObject *__pyx_n_s_bin_to_hex; static PyObject *__pyx_n_s_borg_algorithms_checksums; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_crc32; static PyObject *__pyx_n_s_crc32_clmul; static PyObject *__pyx_n_s_crc32_slice_by_8; static PyObject *__pyx_n_s_data; static PyObject *__pyx_n_s_data_buf; static PyObject *__pyx_n_s_digest; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_n_s_hash; static PyObject *__pyx_n_s_have_clmul; static PyObject *__pyx_n_s_helpers; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_name; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_seed; static PyObject *__pyx_n_s_seed_2; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_kp_s_src_borg_algorithms_checksums_py; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_val; static PyObject *__pyx_n_s_value; static PyObject *__pyx_n_s_xxh64; static PyObject *__pyx_pf_4borg_10algorithms_9checksums_crc32_slice_by_8(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf_4borg_10algorithms_9checksums_2crc32_clmul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf_4borg_10algorithms_9checksums_4xxh64(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data, PyObject *__pyx_v_seed); /* proto */ static int __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64___cinit__(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self, PyObject *__pyx_v_seed); /* proto */ static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_2update(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_4digest(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_6hexdigest(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_8__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_10__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_4borg_10algorithms_9checksums_StreamingXXH64(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__9; static PyObject *__pyx_codeobj__6; static PyObject *__pyx_codeobj__8; static PyObject *__pyx_codeobj__10; /* Late includes */ /* "borg/algorithms/checksums.pyx":40 * * * cdef Py_buffer ro_buffer(object data) except *: # <<<<<<<<<<<<<< * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) */ static Py_buffer __pyx_f_4borg_10algorithms_9checksums_ro_buffer(PyObject *__pyx_v_data) { Py_buffer __pyx_v_view; Py_buffer __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("ro_buffer", 0); /* "borg/algorithms/checksums.pyx":42 * cdef Py_buffer ro_buffer(object data) except *: * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) # <<<<<<<<<<<<<< * return view * */ __pyx_t_1 = PyObject_GetBuffer(__pyx_v_data, (&__pyx_v_view), PyBUF_SIMPLE); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 42, __pyx_L1_error) /* "borg/algorithms/checksums.pyx":43 * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) * return view # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_view; goto __pyx_L0; /* "borg/algorithms/checksums.pyx":40 * * * cdef Py_buffer ro_buffer(object data) except *: # <<<<<<<<<<<<<< * cdef Py_buffer view * PyObject_GetBuffer(data, &view, PyBUF_SIMPLE) */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("borg.algorithms.checksums.ro_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_pretend_to_initialize(&__pyx_r); __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/algorithms/checksums.pyx":46 * * * def crc32_slice_by_8(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_1crc32_slice_by_8(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_10algorithms_9checksums_1crc32_slice_by_8 = {"crc32_slice_by_8", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_10algorithms_9checksums_1crc32_slice_by_8, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_10algorithms_9checksums_1crc32_slice_by_8(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_data = 0; PyObject *__pyx_v_value = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("crc32_slice_by_8 (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_data,&__pyx_n_s_value,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)__pyx_int_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "crc32_slice_by_8") < 0)) __PYX_ERR(1, 46, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_data = values[0]; __pyx_v_value = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("crc32_slice_by_8", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 46, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.algorithms.checksums.crc32_slice_by_8", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_crc32_slice_by_8(__pyx_self, __pyx_v_data, __pyx_v_value); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_crc32_slice_by_8(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data, PyObject *__pyx_v_value) { Py_buffer __pyx_v_data_buf; uint32_t __pyx_v_val; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_buffer __pyx_t_1; uint32_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("crc32_slice_by_8", 0); /* "borg/algorithms/checksums.pyx":47 * * def crc32_slice_by_8(data, value=0): * cdef Py_buffer data_buf = ro_buffer(data) # <<<<<<<<<<<<<< * cdef uint32_t val = value * try: */ __pyx_t_1 = __pyx_f_4borg_10algorithms_9checksums_ro_buffer(__pyx_v_data); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 47, __pyx_L1_error) __pyx_v_data_buf = __pyx_t_1; /* "borg/algorithms/checksums.pyx":48 * def crc32_slice_by_8(data, value=0): * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value # <<<<<<<<<<<<<< * try: * return _crc32_slice_by_8(data_buf.buf, data_buf.len, val) */ __pyx_t_2 = __Pyx_PyInt_As_uint32_t(__pyx_v_value); if (unlikely((__pyx_t_2 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(1, 48, __pyx_L1_error) __pyx_v_val = __pyx_t_2; /* "borg/algorithms/checksums.pyx":49 * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value * try: # <<<<<<<<<<<<<< * return _crc32_slice_by_8(data_buf.buf, data_buf.len, val) * finally: */ /*try:*/ { /* "borg/algorithms/checksums.pyx":50 * cdef uint32_t val = value * try: * return _crc32_slice_by_8(data_buf.buf, data_buf.len, val) # <<<<<<<<<<<<<< * finally: * PyBuffer_Release(&data_buf) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyInt_From_uint32_t(crc32_slice_by_8(__pyx_v_data_buf.buf, __pyx_v_data_buf.len, __pyx_v_val)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 50, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L3_return; } /* "borg/algorithms/checksums.pyx":52 * return _crc32_slice_by_8(data_buf.buf, data_buf.len, val) * finally: * PyBuffer_Release(&data_buf) # <<<<<<<<<<<<<< * * */ /*finally:*/ { __pyx_L4_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyBuffer_Release((&__pyx_v_data_buf)); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L3_return: { __pyx_t_12 = __pyx_r; __pyx_r = 0; PyBuffer_Release((&__pyx_v_data_buf)); __pyx_r = __pyx_t_12; __pyx_t_12 = 0; goto __pyx_L0; } } /* "borg/algorithms/checksums.pyx":46 * * * def crc32_slice_by_8(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.algorithms.checksums.crc32_slice_by_8", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/algorithms/checksums.pyx":55 * * * def crc32_clmul(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_3crc32_clmul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_10algorithms_9checksums_3crc32_clmul = {"crc32_clmul", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_10algorithms_9checksums_3crc32_clmul, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_10algorithms_9checksums_3crc32_clmul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_data = 0; PyObject *__pyx_v_value = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("crc32_clmul (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_data,&__pyx_n_s_value,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)__pyx_int_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "crc32_clmul") < 0)) __PYX_ERR(1, 55, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_data = values[0]; __pyx_v_value = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("crc32_clmul", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 55, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.algorithms.checksums.crc32_clmul", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_2crc32_clmul(__pyx_self, __pyx_v_data, __pyx_v_value); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_2crc32_clmul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data, PyObject *__pyx_v_value) { Py_buffer __pyx_v_data_buf; uint32_t __pyx_v_val; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_buffer __pyx_t_1; uint32_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("crc32_clmul", 0); /* "borg/algorithms/checksums.pyx":56 * * def crc32_clmul(data, value=0): * cdef Py_buffer data_buf = ro_buffer(data) # <<<<<<<<<<<<<< * cdef uint32_t val = value * try: */ __pyx_t_1 = __pyx_f_4borg_10algorithms_9checksums_ro_buffer(__pyx_v_data); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 56, __pyx_L1_error) __pyx_v_data_buf = __pyx_t_1; /* "borg/algorithms/checksums.pyx":57 * def crc32_clmul(data, value=0): * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value # <<<<<<<<<<<<<< * try: * return _crc32_clmul(data_buf.buf, data_buf.len, val) */ __pyx_t_2 = __Pyx_PyInt_As_uint32_t(__pyx_v_value); if (unlikely((__pyx_t_2 == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(1, 57, __pyx_L1_error) __pyx_v_val = __pyx_t_2; /* "borg/algorithms/checksums.pyx":58 * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value * try: # <<<<<<<<<<<<<< * return _crc32_clmul(data_buf.buf, data_buf.len, val) * finally: */ /*try:*/ { /* "borg/algorithms/checksums.pyx":59 * cdef uint32_t val = value * try: * return _crc32_clmul(data_buf.buf, data_buf.len, val) # <<<<<<<<<<<<<< * finally: * PyBuffer_Release(&data_buf) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyInt_From_uint32_t(crc32_clmul(__pyx_v_data_buf.buf, __pyx_v_data_buf.len, __pyx_v_val)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 59, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L3_return; } /* "borg/algorithms/checksums.pyx":61 * return _crc32_clmul(data_buf.buf, data_buf.len, val) * finally: * PyBuffer_Release(&data_buf) # <<<<<<<<<<<<<< * * */ /*finally:*/ { __pyx_L4_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyBuffer_Release((&__pyx_v_data_buf)); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L3_return: { __pyx_t_12 = __pyx_r; __pyx_r = 0; PyBuffer_Release((&__pyx_v_data_buf)); __pyx_r = __pyx_t_12; __pyx_t_12 = 0; goto __pyx_L0; } } /* "borg/algorithms/checksums.pyx":55 * * * def crc32_clmul(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.algorithms.checksums.crc32_clmul", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/algorithms/checksums.pyx":71 * * * def xxh64(data, seed=0): # <<<<<<<<<<<<<< * cdef unsigned long long _seed = seed * cdef XXH64_hash_t hash */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_5xxh64(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4borg_10algorithms_9checksums_5xxh64 = {"xxh64", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4borg_10algorithms_9checksums_5xxh64, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_4borg_10algorithms_9checksums_5xxh64(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_data = 0; PyObject *__pyx_v_seed = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("xxh64 (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_data,&__pyx_n_s_seed,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)__pyx_int_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "xxh64") < 0)) __PYX_ERR(1, 71, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_data = values[0]; __pyx_v_seed = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("xxh64", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 71, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.algorithms.checksums.xxh64", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_4xxh64(__pyx_self, __pyx_v_data, __pyx_v_seed); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_4xxh64(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_data, PyObject *__pyx_v_seed) { unsigned PY_LONG_LONG __pyx_v__seed; XXH64_hash_t __pyx_v_hash; XXH64_canonical_t __pyx_v_digest; Py_buffer __pyx_v_data_buf; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations unsigned PY_LONG_LONG __pyx_t_1; Py_buffer __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("xxh64", 0); /* "borg/algorithms/checksums.pyx":72 * * def xxh64(data, seed=0): * cdef unsigned long long _seed = seed # <<<<<<<<<<<<<< * cdef XXH64_hash_t hash * cdef XXH64_canonical_t digest */ __pyx_t_1 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_v_seed); if (unlikely((__pyx_t_1 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 72, __pyx_L1_error) __pyx_v__seed = __pyx_t_1; /* "borg/algorithms/checksums.pyx":75 * cdef XXH64_hash_t hash * cdef XXH64_canonical_t digest * cdef Py_buffer data_buf = ro_buffer(data) # <<<<<<<<<<<<<< * try: * hash = XXH64(data_buf.buf, data_buf.len, _seed) */ __pyx_t_2 = __pyx_f_4borg_10algorithms_9checksums_ro_buffer(__pyx_v_data); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 75, __pyx_L1_error) __pyx_v_data_buf = __pyx_t_2; /* "borg/algorithms/checksums.pyx":76 * cdef XXH64_canonical_t digest * cdef Py_buffer data_buf = ro_buffer(data) * try: # <<<<<<<<<<<<<< * hash = XXH64(data_buf.buf, data_buf.len, _seed) * finally: */ /*try:*/ { /* "borg/algorithms/checksums.pyx":77 * cdef Py_buffer data_buf = ro_buffer(data) * try: * hash = XXH64(data_buf.buf, data_buf.len, _seed) # <<<<<<<<<<<<<< * finally: * PyBuffer_Release(&data_buf) */ __pyx_v_hash = XXH64(__pyx_v_data_buf.buf, __pyx_v_data_buf.len, __pyx_v__seed); } /* "borg/algorithms/checksums.pyx":79 * hash = XXH64(data_buf.buf, data_buf.len, _seed) * finally: * PyBuffer_Release(&data_buf) # <<<<<<<<<<<<<< * XXH64_canonicalFromHash(&digest, hash) * return PyBytes_FromStringAndSize( digest.digest, 8) */ /*finally:*/ { /*normal exit:*/{ PyBuffer_Release((&__pyx_v_data_buf)); goto __pyx_L5; } __pyx_L5:; } /* "borg/algorithms/checksums.pyx":80 * finally: * PyBuffer_Release(&data_buf) * XXH64_canonicalFromHash(&digest, hash) # <<<<<<<<<<<<<< * return PyBytes_FromStringAndSize( digest.digest, 8) * */ XXH64_canonicalFromHash((&__pyx_v_digest), __pyx_v_hash); /* "borg/algorithms/checksums.pyx":81 * PyBuffer_Release(&data_buf) * XXH64_canonicalFromHash(&digest, hash) * return PyBytes_FromStringAndSize( digest.digest, 8) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyBytes_FromStringAndSize(((char const *)__pyx_v_digest.digest), 8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "borg/algorithms/checksums.pyx":71 * * * def xxh64(data, seed=0): # <<<<<<<<<<<<<< * cdef unsigned long long _seed = seed * cdef XXH64_hash_t hash */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.algorithms.checksums.xxh64", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/algorithms/checksums.pyx":87 * cdef XXH64_state_t state * * def __cinit__(self, seed=0): # <<<<<<<<<<<<<< * cdef unsigned long long _seed = seed * if XXH64_reset(&self.state, _seed) != XXH_OK: */ /* Python wrapper */ static int __pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_seed = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_seed,0}; PyObject* values[1] = {0}; values[0] = ((PyObject *)__pyx_int_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 87, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_seed = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 87, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("borg.algorithms.checksums.StreamingXXH64.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64___cinit__(((struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *)__pyx_v_self), __pyx_v_seed); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64___cinit__(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self, PyObject *__pyx_v_seed) { unsigned PY_LONG_LONG __pyx_v__seed; int __pyx_r; __Pyx_RefNannyDeclarations unsigned PY_LONG_LONG __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "borg/algorithms/checksums.pyx":88 * * def __cinit__(self, seed=0): * cdef unsigned long long _seed = seed # <<<<<<<<<<<<<< * if XXH64_reset(&self.state, _seed) != XXH_OK: * raise Exception('XXH64_reset failed') */ __pyx_t_1 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_v_seed); if (unlikely((__pyx_t_1 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(1, 88, __pyx_L1_error) __pyx_v__seed = __pyx_t_1; /* "borg/algorithms/checksums.pyx":89 * def __cinit__(self, seed=0): * cdef unsigned long long _seed = seed * if XXH64_reset(&self.state, _seed) != XXH_OK: # <<<<<<<<<<<<<< * raise Exception('XXH64_reset failed') * */ __pyx_t_2 = ((XXH64_reset((&__pyx_v_self->state), __pyx_v__seed) != XXH_OK) != 0); if (unlikely(__pyx_t_2)) { /* "borg/algorithms/checksums.pyx":90 * cdef unsigned long long _seed = seed * if XXH64_reset(&self.state, _seed) != XXH_OK: * raise Exception('XXH64_reset failed') # <<<<<<<<<<<<<< * * def update(self, data): */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 90, __pyx_L1_error) /* "borg/algorithms/checksums.pyx":89 * def __cinit__(self, seed=0): * cdef unsigned long long _seed = seed * if XXH64_reset(&self.state, _seed) != XXH_OK: # <<<<<<<<<<<<<< * raise Exception('XXH64_reset failed') * */ } /* "borg/algorithms/checksums.pyx":87 * cdef XXH64_state_t state * * def __cinit__(self, seed=0): # <<<<<<<<<<<<<< * cdef unsigned long long _seed = seed * if XXH64_reset(&self.state, _seed) != XXH_OK: */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.algorithms.checksums.StreamingXXH64.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/algorithms/checksums.pyx":92 * raise Exception('XXH64_reset failed') * * def update(self, data): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * try: */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_3update(PyObject *__pyx_v_self, PyObject *__pyx_v_data); /*proto*/ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_3update(PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("update (wrapper)", 0); __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_2update(((struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *)__pyx_v_self), ((PyObject *)__pyx_v_data)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_2update(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self, PyObject *__pyx_v_data) { Py_buffer __pyx_v_data_buf; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_buffer __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("update", 0); /* "borg/algorithms/checksums.pyx":93 * * def update(self, data): * cdef Py_buffer data_buf = ro_buffer(data) # <<<<<<<<<<<<<< * try: * if XXH64_update(&self.state, data_buf.buf, data_buf.len) != XXH_OK: */ __pyx_t_1 = __pyx_f_4borg_10algorithms_9checksums_ro_buffer(__pyx_v_data); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 93, __pyx_L1_error) __pyx_v_data_buf = __pyx_t_1; /* "borg/algorithms/checksums.pyx":94 * def update(self, data): * cdef Py_buffer data_buf = ro_buffer(data) * try: # <<<<<<<<<<<<<< * if XXH64_update(&self.state, data_buf.buf, data_buf.len) != XXH_OK: * raise Exception('XXH64_update failed') */ /*try:*/ { /* "borg/algorithms/checksums.pyx":95 * cdef Py_buffer data_buf = ro_buffer(data) * try: * if XXH64_update(&self.state, data_buf.buf, data_buf.len) != XXH_OK: # <<<<<<<<<<<<<< * raise Exception('XXH64_update failed') * finally: */ __pyx_t_2 = ((XXH64_update((&__pyx_v_self->state), __pyx_v_data_buf.buf, __pyx_v_data_buf.len) != XXH_OK) != 0); if (unlikely(__pyx_t_2)) { /* "borg/algorithms/checksums.pyx":96 * try: * if XXH64_update(&self.state, data_buf.buf, data_buf.len) != XXH_OK: * raise Exception('XXH64_update failed') # <<<<<<<<<<<<<< * finally: * PyBuffer_Release(&data_buf) */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])), __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 96, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 96, __pyx_L4_error) /* "borg/algorithms/checksums.pyx":95 * cdef Py_buffer data_buf = ro_buffer(data) * try: * if XXH64_update(&self.state, data_buf.buf, data_buf.len) != XXH_OK: # <<<<<<<<<<<<<< * raise Exception('XXH64_update failed') * finally: */ } } /* "borg/algorithms/checksums.pyx":98 * raise Exception('XXH64_update failed') * finally: * PyBuffer_Release(&data_buf) # <<<<<<<<<<<<<< * * def digest(self): */ /*finally:*/ { /*normal exit:*/{ PyBuffer_Release((&__pyx_v_data_buf)); goto __pyx_L5; } __pyx_L4_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyBuffer_Release((&__pyx_v_data_buf)); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L5:; } /* "borg/algorithms/checksums.pyx":92 * raise Exception('XXH64_reset failed') * * def update(self, data): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * try: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("borg.algorithms.checksums.StreamingXXH64.update", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/algorithms/checksums.pyx":100 * PyBuffer_Release(&data_buf) * * def digest(self): # <<<<<<<<<<<<<< * cdef XXH64_hash_t hash * cdef XXH64_canonical_t digest */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_5digest(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_5digest(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("digest (wrapper)", 0); __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_4digest(((struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_4digest(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self) { XXH64_hash_t __pyx_v_hash; XXH64_canonical_t __pyx_v_digest; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("digest", 0); /* "borg/algorithms/checksums.pyx":103 * cdef XXH64_hash_t hash * cdef XXH64_canonical_t digest * hash = XXH64_digest(&self.state) # <<<<<<<<<<<<<< * XXH64_canonicalFromHash(&digest, hash) * return PyBytes_FromStringAndSize( digest.digest, 8) */ __pyx_v_hash = XXH64_digest((&__pyx_v_self->state)); /* "borg/algorithms/checksums.pyx":104 * cdef XXH64_canonical_t digest * hash = XXH64_digest(&self.state) * XXH64_canonicalFromHash(&digest, hash) # <<<<<<<<<<<<<< * return PyBytes_FromStringAndSize( digest.digest, 8) * */ XXH64_canonicalFromHash((&__pyx_v_digest), __pyx_v_hash); /* "borg/algorithms/checksums.pyx":105 * hash = XXH64_digest(&self.state) * XXH64_canonicalFromHash(&digest, hash) * return PyBytes_FromStringAndSize( digest.digest, 8) # <<<<<<<<<<<<<< * * def hexdigest(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyBytes_FromStringAndSize(((char const *)__pyx_v_digest.digest), 8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 105, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/algorithms/checksums.pyx":100 * PyBuffer_Release(&data_buf) * * def digest(self): # <<<<<<<<<<<<<< * cdef XXH64_hash_t hash * cdef XXH64_canonical_t digest */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.algorithms.checksums.StreamingXXH64.digest", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "borg/algorithms/checksums.pyx":107 * return PyBytes_FromStringAndSize( digest.digest, 8) * * def hexdigest(self): # <<<<<<<<<<<<<< * return bin_to_hex(self.digest()) */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_7hexdigest(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_7hexdigest(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("hexdigest (wrapper)", 0); __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_6hexdigest(((struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_6hexdigest(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("hexdigest", 0); /* "borg/algorithms/checksums.pyx":108 * * def hexdigest(self): * return bin_to_hex(self.digest()) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_bin_to_hex); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_digest); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "borg/algorithms/checksums.pyx":107 * return PyBytes_FromStringAndSize( digest.digest, 8) * * def hexdigest(self): # <<<<<<<<<<<<<< * return bin_to_hex(self.digest()) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("borg.algorithms.checksums.StreamingXXH64.hexdigest", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_9__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_9__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_8__reduce_cython__(((struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_8__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.algorithms.checksums.StreamingXXH64.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_11__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_11__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_10__setstate_cython__(((struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4borg_10algorithms_9checksums_14StreamingXXH64_10__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("borg.algorithms.checksums.StreamingXXH64.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_tp_new_4borg_10algorithms_9checksums_StreamingXXH64(PyTypeObject *t, PyObject *a, PyObject *k) { PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; if (unlikely(__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_1__cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_4borg_10algorithms_9checksums_StreamingXXH64(PyObject *o) { #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif (*Py_TYPE(o)->tp_free)(o); } static PyMethodDef __pyx_methods_4borg_10algorithms_9checksums_StreamingXXH64[] = { {"update", (PyCFunction)__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_3update, METH_O, 0}, {"digest", (PyCFunction)__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_5digest, METH_NOARGS, 0}, {"hexdigest", (PyCFunction)__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_7hexdigest, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_9__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw_4borg_10algorithms_9checksums_14StreamingXXH64_11__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type_4borg_10algorithms_9checksums_StreamingXXH64 = { PyVarObject_HEAD_INIT(0, 0) "borg.algorithms.checksums.StreamingXXH64", /*tp_name*/ sizeof(struct __pyx_obj_4borg_10algorithms_9checksums_StreamingXXH64), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4borg_10algorithms_9checksums_StreamingXXH64, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_4borg_10algorithms_9checksums_StreamingXXH64, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4borg_10algorithms_9checksums_StreamingXXH64, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_checksums(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_checksums}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "checksums", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_StreamingXXH64, __pyx_k_StreamingXXH64, sizeof(__pyx_k_StreamingXXH64), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_u_XXH64_reset_failed, __pyx_k_XXH64_reset_failed, sizeof(__pyx_k_XXH64_reset_failed), 0, 1, 0, 0}, {&__pyx_kp_u_XXH64_update_failed, __pyx_k_XXH64_update_failed, sizeof(__pyx_k_XXH64_update_failed), 0, 1, 0, 0}, {&__pyx_n_s_bin_to_hex, __pyx_k_bin_to_hex, sizeof(__pyx_k_bin_to_hex), 0, 0, 1, 1}, {&__pyx_n_s_borg_algorithms_checksums, __pyx_k_borg_algorithms_checksums, sizeof(__pyx_k_borg_algorithms_checksums), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_crc32, __pyx_k_crc32, sizeof(__pyx_k_crc32), 0, 0, 1, 1}, {&__pyx_n_s_crc32_clmul, __pyx_k_crc32_clmul, sizeof(__pyx_k_crc32_clmul), 0, 0, 1, 1}, {&__pyx_n_s_crc32_slice_by_8, __pyx_k_crc32_slice_by_8, sizeof(__pyx_k_crc32_slice_by_8), 0, 0, 1, 1}, {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, {&__pyx_n_s_data_buf, __pyx_k_data_buf, sizeof(__pyx_k_data_buf), 0, 0, 1, 1}, {&__pyx_n_s_digest, __pyx_k_digest, sizeof(__pyx_k_digest), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_n_s_hash, __pyx_k_hash, sizeof(__pyx_k_hash), 0, 0, 1, 1}, {&__pyx_n_s_have_clmul, __pyx_k_have_clmul, sizeof(__pyx_k_have_clmul), 0, 0, 1, 1}, {&__pyx_n_s_helpers, __pyx_k_helpers, sizeof(__pyx_k_helpers), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_seed, __pyx_k_seed, sizeof(__pyx_k_seed), 0, 0, 1, 1}, {&__pyx_n_s_seed_2, __pyx_k_seed_2, sizeof(__pyx_k_seed_2), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_kp_s_src_borg_algorithms_checksums_py, __pyx_k_src_borg_algorithms_checksums_py, sizeof(__pyx_k_src_borg_algorithms_checksums_py), 0, 0, 1, 0}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, {&__pyx_n_s_value, __pyx_k_value, sizeof(__pyx_k_value), 0, 0, 1, 1}, {&__pyx_n_s_xxh64, __pyx_k_xxh64, sizeof(__pyx_k_xxh64), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 2, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "borg/algorithms/checksums.pyx":90 * cdef unsigned long long _seed = seed * if XXH64_reset(&self.state, _seed) != XXH_OK: * raise Exception('XXH64_reset failed') # <<<<<<<<<<<<<< * * def update(self, data): */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_XXH64_reset_failed); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "borg/algorithms/checksums.pyx":96 * try: * if XXH64_update(&self.state, data_buf.buf, data_buf.len) != XXH_OK: * raise Exception('XXH64_update failed') # <<<<<<<<<<<<<< * finally: * PyBuffer_Release(&data_buf) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_XXH64_update_failed); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 96, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "borg/algorithms/checksums.pyx":46 * * * def crc32_slice_by_8(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ __pyx_tuple__5 = PyTuple_Pack(4, __pyx_n_s_data, __pyx_n_s_value, __pyx_n_s_data_buf, __pyx_n_s_val); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); __pyx_codeobj__6 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__5, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_algorithms_checksums_py, __pyx_n_s_crc32_slice_by_8, 46, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__6)) __PYX_ERR(1, 46, __pyx_L1_error) /* "borg/algorithms/checksums.pyx":55 * * * def crc32_clmul(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ __pyx_tuple__7 = PyTuple_Pack(4, __pyx_n_s_data, __pyx_n_s_value, __pyx_n_s_data_buf, __pyx_n_s_val); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 55, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_algorithms_checksums_py, __pyx_n_s_crc32_clmul, 55, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) __PYX_ERR(1, 55, __pyx_L1_error) /* "borg/algorithms/checksums.pyx":71 * * * def xxh64(data, seed=0): # <<<<<<<<<<<<<< * cdef unsigned long long _seed = seed * cdef XXH64_hash_t hash */ __pyx_tuple__9 = PyTuple_Pack(6, __pyx_n_s_data, __pyx_n_s_seed, __pyx_n_s_seed_2, __pyx_n_s_hash, __pyx_n_s_digest, __pyx_n_s_data_buf); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_borg_algorithms_checksums_py, __pyx_n_s_xxh64, 71, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) __PYX_ERR(1, 71, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(1, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(1, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_4borg_10algorithms_9checksums_StreamingXXH64) < 0) __PYX_ERR(1, 84, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_4borg_10algorithms_9checksums_StreamingXXH64.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4borg_10algorithms_9checksums_StreamingXXH64.tp_dictoffset && __pyx_type_4borg_10algorithms_9checksums_StreamingXXH64.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_4borg_10algorithms_9checksums_StreamingXXH64.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (PyObject_SetAttr(__pyx_m, __pyx_n_s_StreamingXXH64, (PyObject *)&__pyx_type_4borg_10algorithms_9checksums_StreamingXXH64) < 0) __PYX_ERR(1, 84, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4borg_10algorithms_9checksums_StreamingXXH64) < 0) __PYX_ERR(1, 84, __pyx_L1_error) __pyx_ptype_4borg_10algorithms_9checksums_StreamingXXH64 = &__pyx_type_4borg_10algorithms_9checksums_StreamingXXH64; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initchecksums(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initchecksums(void) #else __Pyx_PyMODINIT_FUNC PyInit_checksums(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_checksums(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_checksums(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'checksums' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_checksums(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(1, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(1, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(1, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("checksums", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(1, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(1, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(1, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(1, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(1, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_borg__algorithms__checksums) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(1, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(1, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "borg.algorithms.checksums")) { if (unlikely(PyDict_SetItemString(modules, "borg.algorithms.checksums", __pyx_m) < 0)) __PYX_ERR(1, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(1, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(1, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(1, 1, __pyx_L1_error) if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(1, 1, __pyx_L1_error) (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(1, 1, __pyx_L1_error) #endif /* "borg/algorithms/checksums.pyx":3 * # cython: language_level=3 * * from ..helpers import bin_to_hex # <<<<<<<<<<<<<< * * from libc.stdint cimport uint32_t */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_bin_to_hex); __Pyx_GIVEREF(__pyx_n_s_bin_to_hex); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_bin_to_hex); __pyx_t_2 = __Pyx_Import(__pyx_n_s_helpers, __pyx_t_1, 2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_bin_to_hex); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bin_to_hex, __pyx_t_1) < 0) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/algorithms/checksums.pyx":46 * * * def crc32_slice_by_8(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4borg_10algorithms_9checksums_1crc32_slice_by_8, NULL, __pyx_n_s_borg_algorithms_checksums); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_crc32_slice_by_8, __pyx_t_2) < 0) __PYX_ERR(1, 46, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/algorithms/checksums.pyx":55 * * * def crc32_clmul(data, value=0): # <<<<<<<<<<<<<< * cdef Py_buffer data_buf = ro_buffer(data) * cdef uint32_t val = value */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4borg_10algorithms_9checksums_3crc32_clmul, NULL, __pyx_n_s_borg_algorithms_checksums); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 55, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_crc32_clmul, __pyx_t_2) < 0) __PYX_ERR(1, 55, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/algorithms/checksums.pyx":64 * * * have_clmul = _have_clmul() # <<<<<<<<<<<<<< * if have_clmul: * crc32 = crc32_clmul */ __pyx_t_2 = __Pyx_PyInt_From_int(have_clmul()); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 64, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_have_clmul, __pyx_t_2) < 0) __PYX_ERR(1, 64, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/algorithms/checksums.pyx":65 * * have_clmul = _have_clmul() * if have_clmul: # <<<<<<<<<<<<<< * crc32 = crc32_clmul * else: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_have_clmul); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(1, 65, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "borg/algorithms/checksums.pyx":66 * have_clmul = _have_clmul() * if have_clmul: * crc32 = crc32_clmul # <<<<<<<<<<<<<< * else: * crc32 = crc32_slice_by_8 */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_crc32_clmul); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_crc32, __pyx_t_2) < 0) __PYX_ERR(1, 66, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/algorithms/checksums.pyx":65 * * have_clmul = _have_clmul() * if have_clmul: # <<<<<<<<<<<<<< * crc32 = crc32_clmul * else: */ goto __pyx_L2; } /* "borg/algorithms/checksums.pyx":68 * crc32 = crc32_clmul * else: * crc32 = crc32_slice_by_8 # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_crc32_slice_by_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_crc32, __pyx_t_2) < 0) __PYX_ERR(1, 68, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L2:; /* "borg/algorithms/checksums.pyx":71 * * * def xxh64(data, seed=0): # <<<<<<<<<<<<<< * cdef unsigned long long _seed = seed * cdef XXH64_hash_t hash */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4borg_10algorithms_9checksums_5xxh64, NULL, __pyx_n_s_borg_algorithms_checksums); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_xxh64, __pyx_t_2) < 0) __PYX_ERR(1, 71, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "borg/algorithms/checksums.pyx":1 * # cython: language_level=3 # <<<<<<<<<<<<<< * * from ..helpers import bin_to_hex */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init borg.algorithms.checksums", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init borg.algorithms.checksums"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else if (likely(PyCFunction_Check(func))) #endif { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint32_t(uint32_t value) { const uint32_t neg_one = (uint32_t) ((uint32_t) 0 - (uint32_t) 1), const_zero = (uint32_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint32_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint32_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint32_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint32_t), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *x) { const uint32_t neg_one = (uint32_t) ((uint32_t) 0 - (uint32_t) 1), const_zero = (uint32_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(uint32_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(uint32_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (uint32_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, digits[0]) case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 2 * PyLong_SHIFT) { return (uint32_t) (((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 3 * PyLong_SHIFT) { return (uint32_t) (((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 4 * PyLong_SHIFT) { return (uint32_t) (((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (uint32_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(uint32_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case -1: __PYX_VERIFY_RETURN_INT(uint32_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, +digits[0]) case -2: if (8 * sizeof(uint32_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) ((((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -3: if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) ((((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -4: if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) ((((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; } #endif if (sizeof(uint32_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else uint32_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (uint32_t) -1; } } else { uint32_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (uint32_t) -1; val = __Pyx_PyInt_As_uint32_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to uint32_t"); return (uint32_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to uint32_t"); return (uint32_t) -1; } /* CIntFromPy */ static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_As_unsigned_PY_LONG_LONG(PyObject *x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) ((unsigned PY_LONG_LONG) 0 - (unsigned PY_LONG_LONG) 1), const_zero = (unsigned PY_LONG_LONG) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned PY_LONG_LONG) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (unsigned PY_LONG_LONG) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned PY_LONG_LONG) 0; case 1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, digit, digits[0]) case 2: if (8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) >= 2 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); } } break; case 3: if (8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) >= 3 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); } } break; case 4: if (8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) >= 4 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (unsigned PY_LONG_LONG) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned PY_LONG_LONG) 0; case -1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, digit, +digits[0]) case -2: if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); } } break; case 2: if (8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) ((((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); } } break; case -3: if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); } } break; case 3: if (8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) ((((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); } } break; case -4: if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); } } break; case 4: if (8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { return (unsigned PY_LONG_LONG) ((((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); } } break; } #endif if (sizeof(unsigned PY_LONG_LONG) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned PY_LONG_LONG val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned PY_LONG_LONG) -1; } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (unsigned PY_LONG_LONG) -1; val = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; ip) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ borgbackup-1.1.15/src/borg/algorithms/blake2/0000755000175000017500000000000013771325773020744 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/blake2/ref/0000755000175000017500000000000013771325773021520 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/blake2/ref/blake2-impl.h0000644000175000017500000001011713771325506023762 0ustar useruser00000000000000/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves . You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #ifndef BLAKE2_IMPL_H #define BLAKE2_IMPL_H #include #include #if !defined(__cplusplus) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L) #if defined(_MSC_VER) #define BLAKE2_INLINE __inline #elif defined(__GNUC__) #define BLAKE2_INLINE __inline__ #else #define BLAKE2_INLINE #endif #else #define BLAKE2_INLINE inline #endif static BLAKE2_INLINE uint32_t load32( const void *src ) { #if defined(NATIVE_LITTLE_ENDIAN) uint32_t w; memcpy(&w, src, sizeof w); return w; #else const uint8_t *p = ( const uint8_t * )src; return (( uint32_t )( p[0] ) << 0) | (( uint32_t )( p[1] ) << 8) | (( uint32_t )( p[2] ) << 16) | (( uint32_t )( p[3] ) << 24) ; #endif } static BLAKE2_INLINE uint64_t load64( const void *src ) { #if defined(NATIVE_LITTLE_ENDIAN) uint64_t w; memcpy(&w, src, sizeof w); return w; #else const uint8_t *p = ( const uint8_t * )src; return (( uint64_t )( p[0] ) << 0) | (( uint64_t )( p[1] ) << 8) | (( uint64_t )( p[2] ) << 16) | (( uint64_t )( p[3] ) << 24) | (( uint64_t )( p[4] ) << 32) | (( uint64_t )( p[5] ) << 40) | (( uint64_t )( p[6] ) << 48) | (( uint64_t )( p[7] ) << 56) ; #endif } static BLAKE2_INLINE uint16_t load16( const void *src ) { #if defined(NATIVE_LITTLE_ENDIAN) uint16_t w; memcpy(&w, src, sizeof w); return w; #else const uint8_t *p = ( const uint8_t * )src; return (( uint16_t )( p[0] ) << 0) | (( uint16_t )( p[1] ) << 8) ; #endif } static BLAKE2_INLINE void store16( void *dst, uint16_t w ) { #if defined(NATIVE_LITTLE_ENDIAN) memcpy(dst, &w, sizeof w); #else uint8_t *p = ( uint8_t * )dst; *p++ = ( uint8_t )w; w >>= 8; *p++ = ( uint8_t )w; #endif } static BLAKE2_INLINE void store32( void *dst, uint32_t w ) { #if defined(NATIVE_LITTLE_ENDIAN) memcpy(dst, &w, sizeof w); #else uint8_t *p = ( uint8_t * )dst; p[0] = (uint8_t)(w >> 0); p[1] = (uint8_t)(w >> 8); p[2] = (uint8_t)(w >> 16); p[3] = (uint8_t)(w >> 24); #endif } static BLAKE2_INLINE void store64( void *dst, uint64_t w ) { #if defined(NATIVE_LITTLE_ENDIAN) memcpy(dst, &w, sizeof w); #else uint8_t *p = ( uint8_t * )dst; p[0] = (uint8_t)(w >> 0); p[1] = (uint8_t)(w >> 8); p[2] = (uint8_t)(w >> 16); p[3] = (uint8_t)(w >> 24); p[4] = (uint8_t)(w >> 32); p[5] = (uint8_t)(w >> 40); p[6] = (uint8_t)(w >> 48); p[7] = (uint8_t)(w >> 56); #endif } static BLAKE2_INLINE uint64_t load48( const void *src ) { const uint8_t *p = ( const uint8_t * )src; return (( uint64_t )( p[0] ) << 0) | (( uint64_t )( p[1] ) << 8) | (( uint64_t )( p[2] ) << 16) | (( uint64_t )( p[3] ) << 24) | (( uint64_t )( p[4] ) << 32) | (( uint64_t )( p[5] ) << 40) ; } static BLAKE2_INLINE void store48( void *dst, uint64_t w ) { uint8_t *p = ( uint8_t * )dst; p[0] = (uint8_t)(w >> 0); p[1] = (uint8_t)(w >> 8); p[2] = (uint8_t)(w >> 16); p[3] = (uint8_t)(w >> 24); p[4] = (uint8_t)(w >> 32); p[5] = (uint8_t)(w >> 40); } static BLAKE2_INLINE uint32_t rotr32( const uint32_t w, const unsigned c ) { return ( w >> c ) | ( w << ( 32 - c ) ); } static BLAKE2_INLINE uint64_t rotr64( const uint64_t w, const unsigned c ) { return ( w >> c ) | ( w << ( 64 - c ) ); } /* prevents compiler optimizing out memset() */ static BLAKE2_INLINE void secure_zero_memory(void *v, size_t n) { static void *(*const volatile memset_v)(void *, int, size_t) = &memset; memset_v(v, 0, n); } #endif borgbackup-1.1.15/src/borg/algorithms/blake2/ref/blake2b-ref.c0000644000175000017500000002351113771325506023734 0ustar useruser00000000000000/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves . You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include #include #include #include "blake2.h" #include "blake2-impl.h" static const uint64_t blake2b_IV[8] = { 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL, 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL }; static const uint8_t blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } , { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } , { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } , { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } , { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } , { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } , { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } , { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } , { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } , { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } , { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } , { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } }; static void blake2b_set_lastnode( blake2b_state *S ) { S->f[1] = (uint64_t)-1; } /* Some helper functions, not necessarily useful */ static int blake2b_is_lastblock( const blake2b_state *S ) { return S->f[0] != 0; } static void blake2b_set_lastblock( blake2b_state *S ) { if( S->last_node ) blake2b_set_lastnode( S ); S->f[0] = (uint64_t)-1; } static void blake2b_increment_counter( blake2b_state *S, const uint64_t inc ) { S->t[0] += inc; S->t[1] += ( S->t[0] < inc ); } static void blake2b_init0( blake2b_state *S ) { size_t i; memset( S, 0, sizeof( blake2b_state ) ); for( i = 0; i < 8; ++i ) S->h[i] = blake2b_IV[i]; } /* init xors IV with input parameter block */ int blake2b_init_param( blake2b_state *S, const blake2b_param *P ) { const uint8_t *p = ( const uint8_t * )( P ); size_t i; blake2b_init0( S ); /* IV XOR ParamBlock */ for( i = 0; i < 8; ++i ) S->h[i] ^= load64( p + sizeof( S->h[i] ) * i ); S->outlen = P->digest_length; return 0; } int blake2b_init( blake2b_state *S, size_t outlen ) { blake2b_param P[1]; if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; P->digest_length = (uint8_t)outlen; P->key_length = 0; P->fanout = 1; P->depth = 1; store32( &P->leaf_length, 0 ); store32( &P->node_offset, 0 ); store32( &P->xof_length, 0 ); P->node_depth = 0; P->inner_length = 0; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } int blake2b_init_key( blake2b_state *S, size_t outlen, const void *key, size_t keylen ) { blake2b_param P[1]; if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; if ( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = 1; P->depth = 1; store32( &P->leaf_length, 0 ); store32( &P->node_offset, 0 ); store32( &P->xof_length, 0 ); P->node_depth = 0; P->inner_length = 0; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); if( blake2b_init_param( S, P ) < 0 ) return -1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); blake2b_update( S, block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2b_sigma[r][2*i+0]]; \ d = rotr64(d ^ a, 32); \ c = c + d; \ b = rotr64(b ^ c, 24); \ a = a + b + m[blake2b_sigma[r][2*i+1]]; \ d = rotr64(d ^ a, 16); \ c = c + d; \ b = rotr64(b ^ c, 63); \ } while(0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while(0) static void blake2b_compress( blake2b_state *S, const uint8_t block[BLAKE2B_BLOCKBYTES] ) { uint64_t m[16]; uint64_t v[16]; size_t i; for( i = 0; i < 16; ++i ) { m[i] = load64( block + i * sizeof( m[i] ) ); } for( i = 0; i < 8; ++i ) { v[i] = S->h[i]; } v[ 8] = blake2b_IV[0]; v[ 9] = blake2b_IV[1]; v[10] = blake2b_IV[2]; v[11] = blake2b_IV[3]; v[12] = blake2b_IV[4] ^ S->t[0]; v[13] = blake2b_IV[5] ^ S->t[1]; v[14] = blake2b_IV[6] ^ S->f[0]; v[15] = blake2b_IV[7] ^ S->f[1]; ROUND( 0 ); ROUND( 1 ); ROUND( 2 ); ROUND( 3 ); ROUND( 4 ); ROUND( 5 ); ROUND( 6 ); ROUND( 7 ); ROUND( 8 ); ROUND( 9 ); ROUND( 10 ); ROUND( 11 ); for( i = 0; i < 8; ++i ) { S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } } #undef G #undef ROUND int blake2b_update( blake2b_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; if( inlen > 0 ) { size_t left = S->buflen; size_t fill = BLAKE2B_BLOCKBYTES - left; if( inlen > fill ) { S->buflen = 0; memcpy( S->buf + left, in, fill ); /* Fill buffer */ blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES ); blake2b_compress( S, S->buf ); /* Compress */ in += fill; inlen -= fill; while(inlen > BLAKE2B_BLOCKBYTES) { blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES); blake2b_compress( S, in ); in += BLAKE2B_BLOCKBYTES; inlen -= BLAKE2B_BLOCKBYTES; } } memcpy( S->buf + S->buflen, in, inlen ); S->buflen += inlen; } return 0; } int blake2b_final( blake2b_state *S, void *out, size_t outlen ) { uint8_t buffer[BLAKE2B_OUTBYTES] = {0}; size_t i; if( out == NULL || outlen < S->outlen ) return -1; if( blake2b_is_lastblock( S ) ) return -1; blake2b_increment_counter( S, S->buflen ); blake2b_set_lastblock( S ); memset( S->buf + S->buflen, 0, BLAKE2B_BLOCKBYTES - S->buflen ); /* Padding */ blake2b_compress( S, S->buf ); for( i = 0; i < 8; ++i ) /* Output full hash to temp buffer */ store64( buffer + sizeof( S->h[i] ) * i, S->h[i] ); memcpy( out, buffer, S->outlen ); secure_zero_memory(buffer, sizeof(buffer)); return 0; } /* inlen, at least, should be uint64_t. Others can be size_t. */ int blake2b( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { blake2b_state S[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; if( keylen > 0 ) { if( blake2b_init_key( S, outlen, key, keylen ) < 0 ) return -1; } else { if( blake2b_init( S, outlen ) < 0 ) return -1; } blake2b_update( S, ( const uint8_t * )in, inlen ); blake2b_final( S, out, outlen ); return 0; } int blake2( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { return blake2b(out, outlen, in, inlen, key, keylen); } #if defined(SUPERCOP) int crypto_hash( unsigned char *out, unsigned char *in, unsigned long long inlen ) { return blake2b( out, BLAKE2B_OUTBYTES, in, inlen, NULL, 0 ); } #endif #if defined(BLAKE2B_SELFTEST) #include #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2B_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; /* Test simple API */ for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; blake2b( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES ); if( 0 != memcmp( hash, blake2b_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2B_OUTBYTES]; blake2b_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2b_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2b_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2b_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2b_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2b_keyed_kat[i], BLAKE2B_OUTBYTES)) { goto fail; } } } puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif borgbackup-1.1.15/src/borg/algorithms/blake2/ref/blake2.h0000644000175000017500000001446713771325506023037 0ustar useruser00000000000000/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves . You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #ifndef BLAKE2_H #define BLAKE2_H #include #include #if defined(_MSC_VER) #define BLAKE2_PACKED(x) __pragma(pack(push, 1)) x __pragma(pack(pop)) #else #define BLAKE2_PACKED(x) x __attribute__((packed)) #endif #if defined(__cplusplus) extern "C" { #endif enum blake2s_constant { BLAKE2S_BLOCKBYTES = 64, BLAKE2S_OUTBYTES = 32, BLAKE2S_KEYBYTES = 32, BLAKE2S_SALTBYTES = 8, BLAKE2S_PERSONALBYTES = 8 }; enum blake2b_constant { BLAKE2B_BLOCKBYTES = 128, BLAKE2B_OUTBYTES = 64, BLAKE2B_KEYBYTES = 64, BLAKE2B_SALTBYTES = 16, BLAKE2B_PERSONALBYTES = 16 }; typedef struct blake2s_state__ { uint32_t h[8]; uint32_t t[2]; uint32_t f[2]; uint8_t buf[BLAKE2S_BLOCKBYTES]; size_t buflen; size_t outlen; uint8_t last_node; } blake2s_state; typedef struct blake2b_state__ { uint64_t h[8]; uint64_t t[2]; uint64_t f[2]; uint8_t buf[BLAKE2B_BLOCKBYTES]; size_t buflen; size_t outlen; uint8_t last_node; } blake2b_state; typedef struct blake2sp_state__ { blake2s_state S[8][1]; blake2s_state R[1]; uint8_t buf[8 * BLAKE2S_BLOCKBYTES]; size_t buflen; size_t outlen; } blake2sp_state; typedef struct blake2bp_state__ { blake2b_state S[4][1]; blake2b_state R[1]; uint8_t buf[4 * BLAKE2B_BLOCKBYTES]; size_t buflen; size_t outlen; } blake2bp_state; BLAKE2_PACKED(struct blake2s_param__ { uint8_t digest_length; /* 1 */ uint8_t key_length; /* 2 */ uint8_t fanout; /* 3 */ uint8_t depth; /* 4 */ uint32_t leaf_length; /* 8 */ uint32_t node_offset; /* 12 */ uint16_t xof_length; /* 14 */ uint8_t node_depth; /* 15 */ uint8_t inner_length; /* 16 */ /* uint8_t reserved[0]; */ uint8_t salt[BLAKE2S_SALTBYTES]; /* 24 */ uint8_t personal[BLAKE2S_PERSONALBYTES]; /* 32 */ }); typedef struct blake2s_param__ blake2s_param; BLAKE2_PACKED(struct blake2b_param__ { uint8_t digest_length; /* 1 */ uint8_t key_length; /* 2 */ uint8_t fanout; /* 3 */ uint8_t depth; /* 4 */ uint32_t leaf_length; /* 8 */ uint32_t node_offset; /* 12 */ uint32_t xof_length; /* 16 */ uint8_t node_depth; /* 17 */ uint8_t inner_length; /* 18 */ uint8_t reserved[14]; /* 32 */ uint8_t salt[BLAKE2B_SALTBYTES]; /* 48 */ uint8_t personal[BLAKE2B_PERSONALBYTES]; /* 64 */ }); typedef struct blake2b_param__ blake2b_param; typedef struct blake2xs_state__ { blake2s_state S[1]; blake2s_param P[1]; } blake2xs_state; typedef struct blake2xb_state__ { blake2b_state S[1]; blake2b_param P[1]; } blake2xb_state; /* Padded structs result in a compile-time error */ enum { BLAKE2_DUMMY_1 = 1/(sizeof(blake2s_param) == BLAKE2S_OUTBYTES), BLAKE2_DUMMY_2 = 1/(sizeof(blake2b_param) == BLAKE2B_OUTBYTES) }; /* Streaming API */ int blake2s_init( blake2s_state *S, size_t outlen ); int blake2s_init_key( blake2s_state *S, size_t outlen, const void *key, size_t keylen ); int blake2s_init_param( blake2s_state *S, const blake2s_param *P ); int blake2s_update( blake2s_state *S, const void *in, size_t inlen ); int blake2s_final( blake2s_state *S, void *out, size_t outlen ); int blake2b_init( blake2b_state *S, size_t outlen ); int blake2b_init_key( blake2b_state *S, size_t outlen, const void *key, size_t keylen ); int blake2b_init_param( blake2b_state *S, const blake2b_param *P ); int blake2b_update( blake2b_state *S, const void *in, size_t inlen ); int blake2b_final( blake2b_state *S, void *out, size_t outlen ); int blake2sp_init( blake2sp_state *S, size_t outlen ); int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen ); int blake2sp_update( blake2sp_state *S, const void *in, size_t inlen ); int blake2sp_final( blake2sp_state *S, void *out, size_t outlen ); int blake2bp_init( blake2bp_state *S, size_t outlen ); int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ); int blake2bp_update( blake2bp_state *S, const void *in, size_t inlen ); int blake2bp_final( blake2bp_state *S, void *out, size_t outlen ); /* Variable output length API */ int blake2xs_init( blake2xs_state *S, const size_t outlen ); int blake2xs_init_key( blake2xs_state *S, const size_t outlen, const void *key, size_t keylen ); int blake2xs_update( blake2xs_state *S, const void *in, size_t inlen ); int blake2xs_final(blake2xs_state *S, void *out, size_t outlen); int blake2xb_init( blake2xb_state *S, const size_t outlen ); int blake2xb_init_key( blake2xb_state *S, const size_t outlen, const void *key, size_t keylen ); int blake2xb_update( blake2xb_state *S, const void *in, size_t inlen ); int blake2xb_final(blake2xb_state *S, void *out, size_t outlen); /* Simple API */ int blake2s( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ); int blake2b( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ); int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ); int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ); int blake2xs( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ); int blake2xb( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ); /* This is simply an alias for blake2b */ int blake2( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ); #if defined(__cplusplus) } #endif #endif borgbackup-1.1.15/src/borg/algorithms/zstd/0000755000175000017500000000000013771325773020570 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/0000755000175000017500000000000013771325773021336 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/0000755000175000017500000000000013771325773023171 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_fast.h0000644000175000017500000000227213771325506025340 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_FAST_H #define ZSTD_FAST_H #if defined (__cplusplus) extern "C" { #endif #include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" void ZSTD_fillHashTable(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm); size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_FAST_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_lazy.c0000644000175000017500000014724413771325506025366 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "zstd_compress_internal.h" #include "zstd_lazy.h" /*-************************************* * Binary Tree search ***************************************/ static void ZSTD_updateDUBT(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend, U32 mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hashLog = cParams->hashLog; U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32 idx = ms->nextToUpdate; if (idx != target) DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)", idx, target, ms->window.dictLimit); assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */ (void)iend; assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */ for ( ; idx < target ; idx++) { size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */ U32 const matchIndex = hashTable[h]; U32* const nextCandidatePtr = bt + 2*(idx&btMask); U32* const sortMarkPtr = nextCandidatePtr + 1; DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx); hashTable[h] = idx; /* Update Hash Table */ *nextCandidatePtr = matchIndex; /* update BT like a chain */ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK; } ms->nextToUpdate = target; } /** ZSTD_insertDUBT1() : * sort one already inserted but unsorted position * assumption : current >= btlow == (current - btmask) * doesn't fail */ static void ZSTD_insertDUBT1(ZSTD_matchState_t* ms, U32 current, const BYTE* inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; size_t commonLengthSmaller=0, commonLengthLarger=0; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current; const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* match; U32* smallerPtr = bt + 2*(current&btMask); U32* largerPtr = smallerPtr + 1; U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ U32 dummy32; /* to be nullified at the end */ U32 const windowValid = ms->window.lowLimit; U32 const maxDistance = 1U << cParams->windowLog; U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid; DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)", current, dictLimit, windowLow); assert(current >= btLow); assert(ip < iend); /* condition for ZSTD_count */ while (nbCompares-- && (matchIndex > windowLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < current); /* note : all candidates are now supposed sorted, * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */ if ( (dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit) /* both in current segment*/ || (current < dictLimit) /* both in extDict */) { const BYTE* const mBase = ( (dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) ? base : dictBase; assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */ || (current < dictLimit) ); match = mBase + matchIndex; matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); } else { match = dictBase + matchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* preparation for next read of match[matchLength] */ } DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ", current, matchIndex, (U32)matchLength); if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ } if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ /* match is smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u", matchIndex, btLow, nextPtr[1]); smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ } else { /* match is larger than current */ *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u", matchIndex, btLow, nextPtr[0]); largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; } static size_t ZSTD_DUBT_findBetterDictMatch ( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, size_t bestLength, U32 nbCompares, U32 const mls, const ZSTD_dictMode_e dictMode) { const ZSTD_matchState_t * const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dmsCParams = &dms->cParams; const U32 * const dictHashTable = dms->hashTable; U32 const hashLog = dmsCParams->hashLog; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32 dictMatchIndex = dictHashTable[h]; const BYTE* const base = ms->window.base; const BYTE* const prefixStart = base + ms->window.dictLimit; U32 const current = (U32)(ip-base); const BYTE* const dictBase = dms->window.base; const BYTE* const dictEnd = dms->window.nextSrc; U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base); U32 const dictLowLimit = dms->window.lowLimit; U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit; U32* const dictBt = dms->chainTable; U32 const btLog = dmsCParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask; size_t commonLengthSmaller=0, commonLengthLarger=0; (void)dictMode; assert(dictMode == ZSTD_dictMatchState); while (nbCompares-- && (dictMatchIndex > dictLowLimit)) { U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dictBase + dictMatchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (dictMatchIndex+matchLength >= dictHighLimit) match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */ if (matchLength > bestLength) { U32 matchIndex = dictMatchIndex + dictIndexDelta; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex); bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; } if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ break; /* drop, to guarantee consistency (miss a little bit of compression) */ } } if (match[matchLength] < ip[matchLength]) { if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ } else { /* match is larger than current */ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ commonLengthLarger = matchLength; dictMatchIndex = nextPtr[0]; } } if (bestLength >= MINMATCH) { U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", current, (U32)bestLength, (U32)*offsetPtr, mIndex); } return bestLength; } static size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, U32 const mls, const ZSTD_dictMode_e dictMode) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hashLog = cParams->hashLog; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32 matchIndex = hashTable[h]; const BYTE* const base = ms->window.base; U32 const current = (U32)(ip-base); U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog); U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; U32 const btLow = (btMask >= current) ? 0 : current - btMask; U32 const unsortLimit = MAX(btLow, windowLow); U32* nextCandidate = bt + 2*(matchIndex&btMask); U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1; U32 nbCompares = 1U << cParams->searchLog; U32 nbCandidates = nbCompares; U32 previousCandidate = 0; DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current); assert(ip <= iend-8); /* required for h calculation */ /* reach end of unsorted candidates list */ while ( (matchIndex > unsortLimit) && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK) && (nbCandidates > 1) ) { DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted", matchIndex); *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */ previousCandidate = matchIndex; matchIndex = *nextCandidate; nextCandidate = bt + 2*(matchIndex&btMask); unsortedMark = bt + 2*(matchIndex&btMask) + 1; nbCandidates --; } /* nullify last candidate if it's still unsorted * simplification, detrimental to compression ratio, beneficial for speed */ if ( (matchIndex > unsortLimit) && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) { DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u", matchIndex); *nextCandidate = *unsortedMark = 0; } /* batch sort stacked candidates */ matchIndex = previousCandidate; while (matchIndex) { /* will end on matchIndex == 0 */ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1; U32 const nextCandidateIdx = *nextCandidateIdxPtr; ZSTD_insertDUBT1(ms, matchIndex, iend, nbCandidates, unsortLimit, dictMode); matchIndex = nextCandidateIdx; nbCandidates++; } /* find longest match */ { size_t commonLengthSmaller = 0, commonLengthLarger = 0; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; U32* smallerPtr = bt + 2*(current&btMask); U32* largerPtr = bt + 2*(current&btMask) + 1; U32 matchEndIdx = current + 8 + 1; U32 dummy32; /* to be nullified at the end */ size_t bestLength = 0; matchIndex = hashTable[h]; hashTable[h] = current; /* Update Hash Table */ while (nbCompares-- && (matchIndex > windowLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match; if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) { match = base + matchIndex; matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); } else { match = dictBase + matchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ } if (matchLength > bestLength) { if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ if (dictMode == ZSTD_dictMatchState) { nbCompares = 0; /* in addition to avoiding checking any * further in this loop, make sure we * skip checking in the dictionary. */ } break; /* drop, to guarantee consistency (miss a little bit of compression) */ } } if (match[matchLength] < ip[matchLength]) { /* match is smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ } else { /* match is larger than current */ *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; if (dictMode == ZSTD_dictMatchState && nbCompares) { bestLength = ZSTD_DUBT_findBetterDictMatch( ms, ip, iend, offsetPtr, bestLength, nbCompares, mls, dictMode); } assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ if (bestLength >= MINMATCH) { U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", current, (U32)bestLength, (U32)*offsetPtr, mIndex); } return bestLength; } } /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ FORCE_INLINE_TEMPLATE size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, const U32 mls /* template */, const ZSTD_dictMode_e dictMode) { DEBUGLOG(7, "ZSTD_BtFindBestMatch"); if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ ZSTD_updateDUBT(ms, ip, iLimit, mls); return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); } static size_t ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); case 7 : case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); } } static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); case 7 : case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); } } static size_t ZSTD_BtFindBestMatch_extDict_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); case 7 : case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); } } /* ********************************* * Hash Chain ***********************************/ #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)] /* Update chains up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ static U32 ZSTD_insertAndFindFirstIndex_internal( ZSTD_matchState_t* ms, const ZSTD_compressionParameters* const cParams, const BYTE* ip, U32 const mls) { U32* const hashTable = ms->hashTable; const U32 hashLog = cParams->hashLog; U32* const chainTable = ms->chainTable; const U32 chainMask = (1 << cParams->chainLog) - 1; const BYTE* const base = ms->window.base; const U32 target = (U32)(ip - base); U32 idx = ms->nextToUpdate; while(idx < target) { /* catch up */ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; hashTable[h] = idx; idx++; } ms->nextToUpdate = target; return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; } U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { const ZSTD_compressionParameters* const cParams = &ms->cParams; return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch); } /* inlining is important to hardwire a hot branch (template emulation) */ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_generic ( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const chainTable = ms->chainTable; const U32 chainSize = (1 << cParams->chainLog); const U32 chainMask = chainSize-1; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const U32 current = (U32)(ip-base); const U32 maxDistance = 1U << cParams->windowLog; const U32 lowestValid = ms->window.lowLimit; const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid; const U32 isDictionary = (ms->loadedDictEnd != 0); const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance; const U32 minChain = current > chainSize ? current - chainSize : 0; U32 nbAttempts = 1U << cParams->searchLog; size_t ml=4-1; /* HC4 match finder */ U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls); for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { const BYTE* const match = base + matchIndex; assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ if (match[ml] == ip[ml]) /* potentially better */ currentMl = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex; assert(match+4 <= dictEnd); if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; } /* save best solution */ if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } if (matchIndex <= minChain) break; matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); } if (dictMode == ZSTD_dictMatchState) { const ZSTD_matchState_t* const dms = ms->dictMatchState; const U32* const dmsChainTable = dms->chainTable; const U32 dmsChainSize = (1 << dms->cParams.chainLog); const U32 dmsChainMask = dmsChainSize - 1; const U32 dmsLowestIndex = dms->window.dictLimit; const BYTE* const dmsBase = dms->window.base; const BYTE* const dmsEnd = dms->window.nextSrc; const U32 dmsSize = (U32)(dmsEnd - dmsBase); const U32 dmsIndexDelta = dictLimit - dmsSize; const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0; matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; const BYTE* const match = dmsBase + matchIndex; assert(match+4 <= dmsEnd); if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4; /* save best solution */ if (currentMl > ml) { ml = currentMl; *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } if (matchIndex <= dmsMinChain) break; matchIndex = dmsChainTable[matchIndex & dmsChainMask]; } } return ml; } FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); case 7 : case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); } } static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); case 7 : case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); } } FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); case 7 : case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); } } /* ******************************* * Common parser - lazy strategy *********************************/ typedef enum { search_hashChain, search_binaryTree } searchMethod_e; FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth, ZSTD_dictMode_e const dictMode) { const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const U32 prefixLowestIndex = ms->window.dictLimit; const BYTE* const prefixLowest = base + prefixLowestIndex; typedef size_t (*searchMax_f)( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ? (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) : (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS); U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ? dms->window.dictLimit : 0; const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ? dictBase + dictLowestIndex : NULL; const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? prefixLowestIndex - (U32)(dictEnd - dictBase) : 0; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode); /* init */ ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { U32 const current = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, ms->cParams.windowLog); U32 const maxRep = current - windowLow; if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; } if (dictMode == ZSTD_dictMatchState) { /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); } /* Match Loop */ #if defined(__GNUC__) && defined(__x86_64__) /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the * code alignment is perturbed. To fix the instability align the loop on 32-bytes. */ __asm__(".p2align 5"); #endif while (ip < ilimit) { size_t matchLength=0; size_t offset=0; const BYTE* start=ip+1; /* check repCode */ if (dictMode == ZSTD_dictMatchState) { const U32 repIndex = (U32)(ip - base) + 1 - offset_1; const BYTE* repMatch = (dictMode == ZSTD_dictMatchState && repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; if (depth==0) goto _storeSequence; } } if ( dictMode == ZSTD_noDict && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; if (depth==0) goto _storeSequence; } /* first search (depth 0) */ { size_t offsetFound = 999999999; size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); if (ml2 > matchLength) matchLength = ml2, start = ip, offset=offsetFound; } if (matchLength < 4) { ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ continue; } /* let's try to find a better solution */ if (depth>=1) while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 3); int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } if (dictMode == ZSTD_dictMatchState) { const U32 repIndex = (U32)(ip - base) - offset_1; const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 3); int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } } { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; /* search a better one */ } } /* let's find an even better one */ if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 4); int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } if (dictMode == ZSTD_dictMatchState) { const U32 repIndex = (U32)(ip - base) - offset_1; const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 4); int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } } { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* NOTE: * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which * overflows the pointer, which is undefined behavior. */ /* catch up */ if (offset) { if (dictMode == ZSTD_noDict) { while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest)) && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */ { start--; matchLength++; } } if (dictMode == ZSTD_dictMatchState) { U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ } offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); } /* store sequence */ _storeSequence: { size_t const litLength = start - anchor; ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); anchor = ip = start + matchLength; } /* check immediate repcode */ if (dictMode == ZSTD_dictMatchState) { while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex = current2 - offset_2; const BYTE* repMatch = dictMode == ZSTD_dictMatchState && repIndex < prefixLowestIndex ? dictBase - dictIndexDelta + repIndex : base + repIndex; if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); ip += matchLength; anchor = ip; continue; } break; } } if (dictMode == ZSTD_noDict) { while ( ((ip <= ilimit) & (offset_2>0)) && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { /* store sequence */ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ } } } /* Save reps for next block */ rep[0] = offset_1 ? offset_1 : savedOffset; rep[1] = offset_2 ? offset_2 : savedOffset; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_btlazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); } size_t ZSTD_compressBlock_lazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); } size_t ZSTD_compressBlock_lazy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); } size_t ZSTD_compressBlock_greedy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); } size_t ZSTD_compressBlock_btlazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_lazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_lazy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_greedy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth) { const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const U32 dictLimit = ms->window.dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const dictStart = dictBase + ms->window.lowLimit; const U32 windowLog = ms->cParams.windowLog; typedef size_t (*searchMax_f)( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS; U32 offset_1 = rep[0], offset_2 = rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic"); /* init */ ip += (ip == prefixStart); /* Match Loop */ #if defined(__GNUC__) && defined(__x86_64__) /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the * code alignment is perturbed. To fix the instability align the loop on 32-bytes. */ __asm__(".p2align 5"); #endif while (ip < ilimit) { size_t matchLength=0; size_t offset=0; const BYTE* start=ip+1; U32 current = (U32)(ip-base); /* check repCode */ { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current+1, windowLog); const U32 repIndex = (U32)(current+1 - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip+1) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; if (depth==0) goto _storeSequence; } } /* first search (depth 0) */ { size_t offsetFound = 999999999; size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); if (ml2 > matchLength) matchLength = ml2, start = ip, offset=offsetFound; } if (matchLength < 4) { ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ continue; } /* let's try to find a better solution */ if (depth>=1) while (ip= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 3); int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); if ((repLength >= 4) && (gain2 > gain1)) matchLength = repLength, offset = 0, start = ip; } } /* search match, depth 1 */ { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; /* search a better one */ } } /* let's find an even better one */ if ((depth==2) && (ip= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 4); int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); if ((repLength >= 4) && (gain2 > gain1)) matchLength = repLength, offset = 0, start = ip; } } /* search match, depth 2 */ { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* catch up */ if (offset) { U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); } /* store sequence */ _storeSequence: { size_t const litLength = start - anchor; ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); anchor = ip = start + matchLength; } /* check immediate repcode */ while (ip <= ilimit) { const U32 repCurrent = (U32)(ip-base); const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); const U32 repIndex = repCurrent - offset_2; const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ } break; } } /* Save reps for next block */ rep[0] = offset_1; rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_greedy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0); } size_t ZSTD_compressBlock_lazy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1); } size_t ZSTD_compressBlock_lazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2); } size_t ZSTD_compressBlock_btlazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.c0000644000175000017500000006143513771325506026673 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "zstd_compress_internal.h" #include "zstd_double_fast.h" void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashLarge = ms->hashTable; U32 const hBitsL = cParams->hashLog; U32 const mls = cParams->minMatch; U32* const hashSmall = ms->chainTable; U32 const hBitsS = cParams->chainLog; const BYTE* const base = ms->window.base; const BYTE* ip = base + ms->nextToUpdate; const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; /* Always insert every fastHashFillStep position into the hash tables. * Insert the other positions into the large hash table if their entry * is empty. */ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { U32 const current = (U32)(ip - base); U32 i; for (i = 0; i < fastHashFillStep; ++i) { size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); if (i == 0) hashSmall[smHash] = current + i; if (i == 0 || hashLarge[lgHash] == 0) hashLarge[lgHash] = current + i; /* Only load extra positions for ZSTD_dtlm_full */ if (dtlm == ZSTD_dtlm_fast) break; } } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_doubleFast_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */, ZSTD_dictMode_e const dictMode) { ZSTD_compressionParameters const* cParams = &ms->cParams; U32* const hashLong = ms->hashTable; const U32 hBitsL = cParams->hashLog; U32* const hashSmall = ms->chainTable; const U32 hBitsS = cParams->chainLog; const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); /* presumes that, if there is a dictionary, it must be using Attach mode */ const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixLowest = base + prefixLowestIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ? dms->hashTable : NULL; const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ? dms->chainTable : NULL; const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ? dms->window.dictLimit : 0; const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ? dictBase + dictStartIndex : NULL; const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? prefixLowestIndex - (U32)(dictEnd - dictBase) : 0; const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ? dictCParams->hashLog : hBitsL; const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ? dictCParams->chainLog : hBitsS; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic"); assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState); /* if a dictionary is attached, it must be within window range */ if (dictMode == ZSTD_dictMatchState) { assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); } /* init */ ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { U32 const current = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); U32 const maxRep = current - windowLow; if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; } if (dictMode == ZSTD_dictMatchState) { /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); } /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ size_t mLength; U32 offset; size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8); size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls); U32 const current = (U32)(ip-base); U32 const matchIndexL = hashLong[h2]; U32 matchIndexS = hashSmall[h]; const BYTE* matchLong = base + matchIndexL; const BYTE* match = base + matchIndexS; const U32 repIndex = current + 1 - offset_1; const BYTE* repMatch = (dictMode == ZSTD_dictMatchState && repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; hashLong[h2] = hashSmall[h] = current; /* update hash tables */ /* check dictMatchState repcode */ if (dictMode == ZSTD_dictMatchState && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); goto _match_stored; } /* check noDict repcode */ if ( dictMode == ZSTD_noDict && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); goto _match_stored; } if (matchIndexL > prefixLowestIndex) { /* check prefix long match */ if (MEM_read64(matchLong) == MEM_read64(ip)) { mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; offset = (U32)(ip-matchLong); while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ goto _match_found; } } else if (dictMode == ZSTD_dictMatchState) { /* check dictMatchState long match */ U32 const dictMatchIndexL = dictHashLong[dictHL]; const BYTE* dictMatchL = dictBase + dictMatchIndexL; assert(dictMatchL < dictEnd); if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8; offset = (U32)(current - dictMatchIndexL - dictIndexDelta); while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */ goto _match_found; } } if (matchIndexS > prefixLowestIndex) { /* check prefix short match */ if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } } else if (dictMode == ZSTD_dictMatchState) { /* check dictMatchState short match */ U32 const dictMatchIndexS = dictHashSmall[dictHS]; match = dictBase + dictMatchIndexS; matchIndexS = dictMatchIndexS + dictIndexDelta; if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } } ip += ((ip-anchor) >> kSearchStrength) + 1; #if defined(__aarch64__) PREFETCH_L1(ip+256); #endif continue; _search_next_long: { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8); U32 const matchIndexL3 = hashLong[hl3]; const BYTE* matchL3 = base + matchIndexL3; hashLong[hl3] = current + 1; /* check prefix long +1 match */ if (matchIndexL3 > prefixLowestIndex) { if (MEM_read64(matchL3) == MEM_read64(ip+1)) { mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; ip++; offset = (U32)(ip-matchL3); while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ goto _match_found; } } else if (dictMode == ZSTD_dictMatchState) { /* check dict long +1 match */ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8; ip++; offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta); while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */ goto _match_found; } } } /* if no long +1 match, explore the short match we found */ if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) { mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; offset = (U32)(current - matchIndexS); while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } else { mLength = ZSTD_count(ip+4, match+4, iend) + 4; offset = (U32)(ip - match); while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } /* fall-through */ _match_found: offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); _match_stored: /* match found */ ip += mLength; anchor = ip; if (ip <= ilimit) { /* Complementary insertion */ /* done after iLimit test, as candidates could be > iend-8 */ { U32 const indexToInsert = current+2; hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); } /* check immediate repcode */ if (dictMode == ZSTD_dictMatchState) { while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState && repIndex2 < prefixLowestIndex ? dictBase + repIndex2 - dictIndexDelta : base + repIndex2; if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; anchor = ip; continue; } break; } } if (dictMode == ZSTD_noDict) { while ( (ip <= ilimit) && ( (offset_2>0) & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { /* store sequence */ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH); ip += rLength; anchor = ip; continue; /* faster when present ... (?) */ } } } } /* while (ip < ilimit) */ /* save reps for next block */ rep[0] = offset_1 ? offset_1 : offsetSaved; rep[1] = offset_2 ? offset_2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict); case 5 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict); case 6 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict); case 7 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict); } } size_t ZSTD_compressBlock_doubleFast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState); case 5 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState); case 6 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState); case 7 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState); } } static size_t ZSTD_compressBlock_doubleFast_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) { ZSTD_compressionParameters const* cParams = &ms->cParams; U32* const hashLong = ms->hashTable; U32 const hBitsL = cParams->hashLog; U32* const hashSmall = ms->chainTable; U32 const hBitsS = cParams->chainLog; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); const U32 dictStartIndex = lowLimit; const U32 dictLimit = ms->window.dictLimit; const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dictBase + prefixStartIndex; U32 offset_1=rep[0], offset_2=rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize); /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); const U32 matchIndex = hashSmall[hSmall]; const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; const BYTE* match = matchBase + matchIndex; const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); const U32 matchLongIndex = hashLong[hLong]; const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base; const BYTE* matchLong = matchLongBase + matchLongIndex; const U32 current = (U32)(ip-base); const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; size_t mLength; hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ & (repIndex > dictStartIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); } else { if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; U32 offset; mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8; offset = current - matchLongIndex; while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); U32 const matchIndex3 = hashLong[h3]; const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base; const BYTE* match3 = match3Base + matchIndex3; U32 offset; hashLong[h3] = current + 1; if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8; ip++; offset = current+1 - matchIndex3; while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ } else { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; offset = current - matchIndex; while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } else { ip += ((ip-anchor) >> kSearchStrength) + 1; continue; } } /* move to next sequence start */ ip += mLength; anchor = ip; if (ip <= ilimit) { /* Complementary insertion */ /* done after iLimit test, as candidates could be > iend-8 */ { U32 const indexToInsert = current+2; hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); } /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ & (repIndex2 > dictStartIndex)) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1; rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_doubleFast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); } } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.c0000644000175000017500000012423713771325506030470 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include "zstd_compress_superblock.h" #include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ #include "hist.h" /* HIST_countFast_wksp */ #include "zstd_compress_internal.h" #include "zstd_compress_sequences.h" #include "zstd_compress_literals.h" /*-************************************* * Superblock entropy buffer structs ***************************************/ /** ZSTD_hufCTablesMetadata_t : * Stores Literals Block Type for a super-block in hType, and * huffman tree description in hufDesBuffer. * hufDesSize refers to the size of huffman tree description in bytes. * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */ typedef struct { symbolEncodingType_e hType; BYTE hufDesBuffer[500]; /* TODO give name to this value */ size_t hufDesSize; } ZSTD_hufCTablesMetadata_t; /** ZSTD_fseCTablesMetadata_t : * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and * fse tables in fseTablesBuffer. * fseTablesSize refers to the size of fse tables in bytes. * This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */ typedef struct { symbolEncodingType_e llType; symbolEncodingType_e ofType; symbolEncodingType_e mlType; BYTE fseTablesBuffer[500]; /* TODO give name to this value */ size_t fseTablesSize; size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */ } ZSTD_fseCTablesMetadata_t; typedef struct { ZSTD_hufCTablesMetadata_t hufMetadata; ZSTD_fseCTablesMetadata_t fseMetadata; } ZSTD_entropyCTablesMetadata_t; /** ZSTD_buildSuperBlockEntropy_literal() : * Builds entropy for the super-block literals. * Stores literals block type (raw, rle, compressed, repeat) and * huffman description table to hufMetadata. * @return : size of huffman description table or error code */ static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize, const ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_hufCTablesMetadata_t* hufMetadata, const int disableLiteralsCompression, void* workspace, size_t wkspSize) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; BYTE* const countWkspStart = wkspStart; unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); BYTE* const nodeWksp = countWkspStart + countWkspSize; const size_t nodeWkspSize = wkspEnd-nodeWksp; unsigned maxSymbolValue = 255; unsigned huffLog = HUF_TABLELOG_DEFAULT; HUF_repeat repeat = prevHuf->repeatMode; DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize); /* Prepare nextEntropy assuming reusing the existing table */ memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); if (disableLiteralsCompression) { DEBUGLOG(5, "set_basic - disabled"); hufMetadata->hType = set_basic; return 0; } /* small ? don't even attempt compression (speed opt) */ # define COMPRESS_LITERALS_SIZE_MIN 63 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) { DEBUGLOG(5, "set_basic - too small"); hufMetadata->hType = set_basic; return 0; } } /* Scan input and build symbol stats */ { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); if (largest == srcSize) { DEBUGLOG(5, "set_rle"); hufMetadata->hType = set_rle; return 0; } if (largest <= (srcSize >> 7)+4) { DEBUGLOG(5, "set_basic - no gain"); hufMetadata->hType = set_basic; return 0; } } /* Validate the previous Huffman table */ if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { repeat = HUF_repeat_none; } /* Build Huffman Tree */ memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); huffLog = (U32)maxBits; { /* Build and write the CTable */ size_t const newCSize = HUF_estimateCompressedSize( (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); size_t const hSize = HUF_writeCTable( hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog); /* Check against repeating the previous CTable */ if (repeat != HUF_repeat_none) { size_t const oldCSize = HUF_estimateCompressedSize( (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { DEBUGLOG(5, "set_repeat - smaller"); memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); hufMetadata->hType = set_repeat; return 0; } } if (newCSize + hSize >= srcSize) { DEBUGLOG(5, "set_basic - no gains"); memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); hufMetadata->hType = set_basic; return 0; } DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); hufMetadata->hType = set_compressed; nextHuf->repeatMode = HUF_repeat_check; return hSize; } } } /** ZSTD_buildSuperBlockEntropy_sequences() : * Builds entropy for the super-block sequences. * Stores symbol compression modes and fse table to fseMetadata. * @return : size of fse tables or error code */ static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr, const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, size_t wkspSize) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; BYTE* const countWkspStart = wkspStart; unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned); BYTE* const cTableWksp = countWkspStart + countWkspSize; const size_t cTableWkspSize = wkspEnd-cTableWksp; ZSTD_strategy const strategy = cctxParams->cParams.strategy; FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; BYTE* const ostart = fseMetadata->fseTablesBuffer; BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); BYTE* op = ostart; assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE)); DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq); memset(workspace, 0, wkspSize); fseMetadata->lastCountSize = 0; /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* build CTable for Literal Lengths */ { U32 LLtype; unsigned max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, countWksp, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable), cTableWksp, cTableWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed"); if (LLtype == set_compressed) fseMetadata->lastCountSize = countSize; op += countSize; fseMetadata->llType = (symbolEncodingType_e) LLtype; } } /* build CTable for Offsets */ { U32 Offtype; unsigned max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, countWksp, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable), cTableWksp, cTableWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed"); if (Offtype == set_compressed) fseMetadata->lastCountSize = countSize; op += countSize; fseMetadata->ofType = (symbolEncodingType_e) Offtype; } } /* build CTable for MatchLengths */ { U32 MLtype; unsigned max = MaxML; size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, countWksp, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable), cTableWksp, cTableWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed"); if (MLtype == set_compressed) fseMetadata->lastCountSize = countSize; op += countSize; fseMetadata->mlType = (symbolEncodingType_e) MLtype; } } assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer)); return op-ostart; } /** ZSTD_buildSuperBlockEntropy() : * Builds entropy for the super-block. * @return : 0 on success or error code */ static size_t ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize) { size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy"); entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_disableLiteralsCompression(cctxParams), workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed"); entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr, &prevEntropy->fse, &nextEntropy->fse, cctxParams, &entropyMetadata->fseMetadata, workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed"); return 0; } /** ZSTD_compressSubBlock_literal() : * Compresses literals section for a sub-block. * When we have to write the Huffman table we will sometimes choose a header * size larger than necessary. This is because we have to pick the header size * before we know the table size + compressed size, so we have a bound on the * table size. If we guessed incorrectly, we fall back to uncompressed literals. * * We write the header when writeEntropy=1 and set entropyWrriten=1 when we succeeded * in writing the header, otherwise it is set to 0. * * hufMetadata->hType has literals block type info. * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block * and the following sub-blocks' literals sections will be Treeless_Literals_Block. * @return : compressed size of literals section of a sub-block * Or 0 if it unable to compress. * Or error code */ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, const ZSTD_hufCTablesMetadata_t* hufMetadata, const BYTE* literals, size_t litSize, void* dst, size_t dstSize, const int bmi2, int writeEntropy, int* entropyWritten) { size_t const header = writeEntropy ? 200 : 0; size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart + lhSize; U32 const singleStream = lhSize == 3; symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; size_t cLitSize = 0; (void)bmi2; /* TODO bmi2... */ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); *entropyWritten = 0; if (litSize == 0 || hufMetadata->hType == set_basic) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } else if (hufMetadata->hType == set_rle) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); } assert(litSize > 0); assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); if (writeEntropy && hufMetadata->hType == set_compressed) { memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); op += hufMetadata->hufDesSize; cLitSize += hufMetadata->hufDesSize; DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); } /* TODO bmi2 */ { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); op += cSize; cLitSize += cSize; if (cSize == 0 || ERR_isError(cSize)) { DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); return 0; } /* If we expand and we aren't writing a header then emit uncompressed */ if (!writeEntropy && cLitSize >= litSize) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } /* If we are writing headers then allow expansion that doesn't change our header size. */ if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { assert(cLitSize > litSize); DEBUGLOG(5, "Literals expanded beyond allowed header size"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); } /* Build header */ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; } default: /* not possible : lhSize is {3,4,5} */ assert(0); } *entropyWritten = 1; DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); return op-ostart; } static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { const seqDef* const sstart = sequences; const seqDef* const send = sequences + nbSeq; const seqDef* sp = sstart; size_t matchLengthSum = 0; size_t litLengthSum = 0; while (send-sp > 0) { ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); litLengthSum += seqLen.litLength; matchLengthSum += seqLen.matchLength; sp++; } assert(litLengthSum <= litSize); if (!lastSequence) { assert(litLengthSum == litSize); } return matchLengthSum + litSize; } /** ZSTD_compressSubBlock_sequences() : * Compresses sequences section for a sub-block. * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have * symbol compression modes for the super-block. * The first successfully compressed block will have these in its header. * We set entropyWritten=1 when we succeed in compressing the sequences. * The following sub-blocks will always have repeat mode. * @return : compressed size of sequences section of a sub-block * Or 0 if it is unable to compress * Or error code. */ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, const ZSTD_fseCTablesMetadata_t* fseMetadata, const seqDef* sequences, size_t nbSeq, const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const int bmi2, int writeEntropy, int* entropyWritten) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; BYTE* seqHead; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); *entropyWritten = 0; /* Sequences Header */ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, dstSize_tooSmall, ""); if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) { return op - ostart; } /* seqHead : flags for FSE encoding type */ seqHead = op++; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); if (writeEntropy) { const U32 LLtype = fseMetadata->llType; const U32 Offtype = fseMetadata->ofType; const U32 MLtype = fseMetadata->mlType; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); op += fseMetadata->fseTablesSize; } else { const U32 repeat = set_repeat; *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); } { size_t const bitstreamSize = ZSTD_encodeSequences( op, oend - op, fseTables->matchlengthCTable, mlCode, fseTables->offcodeCTable, ofCode, fseTables->litlengthCTable, llCode, sequences, nbSeq, longOffsets, bmi2); FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); op += bitstreamSize; /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(fseMetadata->lastCountSize + bitstreamSize == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } #endif DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); } /* zstd versions <= 1.4.0 mistakenly report error when * sequences section body size is less than 3 bytes. * Fixed by https://github.com/facebook/zstd/pull/1664. * This can happen when the previous sequences section block is compressed * with rle mode and the current block's sequences section is compressed * with repeat mode where sequences section body size can be 1 byte. */ #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION if (op-seqHead < 4) { DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " "an uncompressed block when sequences are < 4 bytes"); return 0; } #endif *entropyWritten = 1; return op - ostart; } /** ZSTD_compressSubBlock() : * Compresses a single sub-block. * @return : compressed size of the sub-block * Or 0 if it failed to compress. */ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, const seqDef* sequences, size_t nbSeq, const BYTE* literals, size_t litSize, const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const int bmi2, int writeLitEntropy, int writeSeqEntropy, int* litEntropyWritten, int* seqEntropyWritten, U32 lastBlock) { BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart + ZSTD_blockHeaderSize; DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, &entropyMetadata->hufMetadata, literals, litSize, op, oend-op, bmi2, writeLitEntropy, litEntropyWritten); FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); if (cLitSize == 0) return 0; op += cLitSize; } { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, &entropyMetadata->fseMetadata, sequences, nbSeq, llCode, mlCode, ofCode, cctxParams, op, oend-op, bmi2, writeSeqEntropy, seqEntropyWritten); FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); if (cSeqSize == 0) return 0; op += cSeqSize; } /* Write block header */ { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize; U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(ostart, cBlockHeader24); } return op-ostart; } static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, const ZSTD_hufCTables_t* huf, const ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, size_t wkspSize, int writeEntropy) { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = 255; size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ if (hufMetadata->hType == set_basic) return litSize; else if (hufMetadata->hType == set_rle) return 1; else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); if (ZSTD_isError(largest)) return litSize; { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; return cLitSizeEstimate + literalSectionHeaderSize; } } assert(0); /* impossible */ return 0; } static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, const BYTE* codeTable, unsigned maxCode, size_t nbSeq, const FSE_CTable* fseCTable, const U32* additionalBits, short const* defaultNorm, U32 defaultNormLog, void* workspace, size_t wkspSize) { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; const BYTE* const ctStart = ctp; const BYTE* const ctEnd = ctStart + nbSeq; size_t cSymbolTypeSizeEstimateInBits = 0; unsigned max = maxCode; HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ if (type == set_basic) { cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); } else if (type == set_rle) { cSymbolTypeSizeEstimateInBits = 0; } else if (type == set_compressed || type == set_repeat) { cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); } if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; while (ctp < ctEnd) { if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ ctp++; } return cSymbolTypeSizeEstimateInBits / 8; } static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_fseCTables_t* fseTables, const ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, size_t wkspSize, int writeEntropy) { size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ size_t cSeqSizeEstimate = 0; cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, nbSeq, fseTables->offcodeCTable, NULL, OF_defaultNorm, OF_defaultNormLog, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, nbSeq, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, nbSeq, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, workspace, wkspSize); if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize, int writeLitEntropy, int writeSeqEntropy) { size_t cSizeEstimate = 0; cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); return cSizeEstimate + ZSTD_blockHeaderSize; } static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) { if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) return 1; if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) return 1; if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) return 1; return 0; } /** ZSTD_compressSubBlock_multi() : * Breaks super-block into multiple sub-blocks and compresses them. * Entropy will be written to the first block. * The following blocks will use repeat mode to compress. * All sub-blocks are compressed blocks (no raw or rle blocks). * @return : compressed size of the super block (which is multiple ZSTD blocks) * Or 0 if it failed to compress. */ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, const ZSTD_compressedBlockState_t* prevCBlock, ZSTD_compressedBlockState_t* nextCBlock, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const int bmi2, U32 lastBlock, void* workspace, size_t wkspSize) { const seqDef* const sstart = seqStorePtr->sequencesStart; const seqDef* const send = seqStorePtr->sequences; const seqDef* sp = sstart; const BYTE* const lstart = seqStorePtr->litStart; const BYTE* const lend = seqStorePtr->lit; const BYTE* lp = lstart; BYTE const* ip = (BYTE const*)src; BYTE const* const iend = ip + srcSize; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; const BYTE* llCodePtr = seqStorePtr->llCode; const BYTE* mlCodePtr = seqStorePtr->mlCode; const BYTE* ofCodePtr = seqStorePtr->ofCode; size_t targetCBlockSize = cctxParams->targetCBlockSize; size_t litSize, seqCount; int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed; int writeSeqEntropy = 1; int lastSequence = 0; DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)", (unsigned)(lend-lp), (unsigned)(send-sstart)); litSize = 0; seqCount = 0; do { size_t cBlockSizeEstimate = 0; if (sstart == send) { lastSequence = 1; } else { const seqDef* const sequence = sp + seqCount; lastSequence = sequence == send - 1; litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength; seqCount++; } if (lastSequence) { assert(lp <= lend); assert(litSize <= (size_t)(lend - lp)); litSize = (size_t)(lend - lp); } /* I think there is an optimization opportunity here. * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful * since it recalculates estimate from scratch. * For example, it would recount literal distribution and symbol codes everytime. */ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, &nextCBlock->entropy, entropyMetadata, workspace, wkspSize, writeLitEntropy, writeSeqEntropy); if (cBlockSizeEstimate > targetCBlockSize || lastSequence) { int litEntropyWritten = 0; int seqEntropyWritten = 0; const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence); const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, sp, seqCount, lp, litSize, llCodePtr, mlCodePtr, ofCodePtr, cctxParams, op, oend-op, bmi2, writeLitEntropy, writeSeqEntropy, &litEntropyWritten, &seqEntropyWritten, lastBlock && lastSequence); FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); if (cSize > 0 && cSize < decompressedSize) { DEBUGLOG(5, "Committed the sub-block"); assert(ip + decompressedSize <= iend); ip += decompressedSize; sp += seqCount; lp += litSize; op += cSize; llCodePtr += seqCount; mlCodePtr += seqCount; ofCodePtr += seqCount; litSize = 0; seqCount = 0; /* Entropy only needs to be written once */ if (litEntropyWritten) { writeLitEntropy = 0; } if (seqEntropyWritten) { writeSeqEntropy = 0; } } } } while (!lastSequence); if (writeLitEntropy) { DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten"); memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); } if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { /* If we haven't written our entropy tables, then we've violated our contract and * must emit an uncompressed block. */ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten"); return 0; } if (ip < iend) { size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); assert(cSize != 0); op += cSize; /* We have to regenerate the repcodes because we've skipped some sequences */ if (sp < send) { seqDef const* seq; repcodes_t rep; memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); } memcpy(nextCBlock->rep, &rep, sizeof(rep)); } } DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed"); return op-ostart; } size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, void const* src, size_t srcSize, unsigned lastBlock) { ZSTD_entropyCTablesMetadata_t entropyMetadata; FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, &entropyMetadata, zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); return ZSTD_compressSubBlock_multi(&zc->seqStore, zc->blockState.prevCBlock, zc->blockState.nextCBlock, &entropyMetadata, &zc->appliedParams, dst, dstCapacity, src, srcSize, zc->bmi2, lastBlock, zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.c0000644000175000017500000026732213771325506026603 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ====== Compiler specifics ====== */ #if defined(_MSC_VER) # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ #endif /* ====== Constants ====== */ #define ZSTDMT_OVERLAPLOG_DEFAULT 0 /* ====== Dependencies ====== */ #include /* memcpy, memset */ #include /* INT_MAX, UINT_MAX */ #include "../common/mem.h" /* MEM_STATIC */ #include "../common/pool.h" /* threadpool */ #include "../common/threading.h" /* mutex */ #include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ #include "zstd_ldm.h" #include "zstdmt_compress.h" /* Guards code to support resizing the SeqPool. * We will want to resize the SeqPool to save memory in the future. * Until then, comment the code out since it is unused. */ #define ZSTD_RESIZE_SEQPOOL 0 /* ====== Debug ====== */ #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \ && !defined(_MSC_VER) \ && !defined(__MINGW32__) # include # include # include # define DEBUG_PRINTHEX(l,p,n) { \ unsigned debug_u; \ for (debug_u=0; debug_u<(n); debug_u++) \ RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ RAWLOG(l, " \n"); \ } static unsigned long long GetCurrentClockTimeMicroseconds(void) { static clock_t _ticksPerSecond = 0; if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); { struct tms junk; clock_t newTicks = (clock_t) times(&junk); return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); } } #define MUTEX_WAIT_TIME_DLEVEL 6 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \ if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ ZSTD_pthread_mutex_lock(mutex); \ { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ unsigned long long const elapsedTime = (afterTime-beforeTime); \ if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ elapsedTime, #mutex); \ } } \ } else { \ ZSTD_pthread_mutex_lock(mutex); \ } \ } #else # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) # define DEBUG_PRINTHEX(l,p,n) {} #endif /* ===== Buffer Pool ===== */ /* a single Buffer Pool can be invoked from multiple threads in parallel */ typedef struct buffer_s { void* start; size_t capacity; } buffer_t; static const buffer_t g_nullBuffer = { NULL, 0 }; typedef struct ZSTDMT_bufferPool_s { ZSTD_pthread_mutex_t poolMutex; size_t bufferSize; unsigned totalBuffers; unsigned nbBuffers; ZSTD_customMem cMem; buffer_t bTable[1]; /* variable size */ } ZSTDMT_bufferPool; static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem) { unsigned const maxNbBuffers = 2*nbWorkers + 3; ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc( sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); if (bufPool==NULL) return NULL; if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { ZSTD_free(bufPool, cMem); return NULL; } bufPool->bufferSize = 64 KB; bufPool->totalBuffers = maxNbBuffers; bufPool->nbBuffers = 0; bufPool->cMem = cMem; return bufPool; } static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) { unsigned u; DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); if (!bufPool) return; /* compatibility with free on NULL */ for (u=0; utotalBuffers; u++) { DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start); ZSTD_free(bufPool->bTable[u].start, bufPool->cMem); } ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); ZSTD_free(bufPool, bufPool->cMem); } /* only works at initialization, not during compression */ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) { size_t const poolSize = sizeof(*bufPool) + (bufPool->totalBuffers - 1) * sizeof(buffer_t); unsigned u; size_t totalBufferSize = 0; ZSTD_pthread_mutex_lock(&bufPool->poolMutex); for (u=0; utotalBuffers; u++) totalBufferSize += bufPool->bTable[u].capacity; ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return poolSize + totalBufferSize; } /* ZSTDMT_setBufferSize() : * all future buffers provided by this buffer pool will have _at least_ this size * note : it's better for all buffers to have same size, * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize) { ZSTD_pthread_mutex_lock(&bufPool->poolMutex); DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize); bufPool->bufferSize = bSize; ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); } static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers) { unsigned const maxNbBuffers = 2*nbWorkers + 3; if (srcBufPool==NULL) return NULL; if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ return srcBufPool; /* need a larger buffer pool */ { ZSTD_customMem const cMem = srcBufPool->cMem; size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ ZSTDMT_bufferPool* newBufPool; ZSTDMT_freeBufferPool(srcBufPool); newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); if (newBufPool==NULL) return newBufPool; ZSTDMT_setBufferSize(newBufPool, bSize); return newBufPool; } } /** ZSTDMT_getBuffer() : * assumption : bufPool must be valid * @return : a buffer, with start pointer and size * note: allocation may fail, in this case, start==NULL and size==0 */ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) { size_t const bSize = bufPool->bufferSize; DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers) { /* try to use an existing buffer */ buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)]; size_t const availBufferSize = buf.capacity; bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer; if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { /* large enough, but not too much */ DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", bufPool->nbBuffers, (U32)buf.capacity); ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return buf; } /* size conditions not respected : scratch this buffer, create new one */ DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing"); ZSTD_free(buf.start, bufPool->cMem); } ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* create new buffer */ DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); { buffer_t buffer; void* const start = ZSTD_malloc(bSize, bufPool->cMem); buffer.start = start; /* note : start can be NULL if malloc fails ! */ buffer.capacity = (start==NULL) ? 0 : bSize; if (start==NULL) { DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!"); } else { DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize); } return buffer; } } #if ZSTD_RESIZE_SEQPOOL /** ZSTDMT_resizeBuffer() : * assumption : bufPool must be valid * @return : a buffer that is at least the buffer pool buffer size. * If a reallocation happens, the data in the input buffer is copied. */ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) { size_t const bSize = bufPool->bufferSize; if (buffer.capacity < bSize) { void* const start = ZSTD_malloc(bSize, bufPool->cMem); buffer_t newBuffer; newBuffer.start = start; newBuffer.capacity = start == NULL ? 0 : bSize; if (start != NULL) { assert(newBuffer.capacity >= buffer.capacity); memcpy(newBuffer.start, buffer.start, buffer.capacity); DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize); return newBuffer; } DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!"); } return buffer; } #endif /* store buffer for later re-use, up to pool capacity */ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) { DEBUGLOG(5, "ZSTDMT_releaseBuffer"); if (buf.start == NULL) return; /* compatible with release on NULL */ ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers < bufPool->totalBuffers) { bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return; } ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* Reached bufferPool capacity (should not happen) */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); ZSTD_free(buf.start, bufPool->cMem); } /* ===== Seq Pool Wrapper ====== */ static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0}; typedef ZSTDMT_bufferPool ZSTDMT_seqPool; static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) { return ZSTDMT_sizeof_bufferPool(seqPool); } static rawSeqStore_t bufferToSeq(buffer_t buffer) { rawSeqStore_t seq = {NULL, 0, 0, 0}; seq.seq = (rawSeq*)buffer.start; seq.capacity = buffer.capacity / sizeof(rawSeq); return seq; } static buffer_t seqToBuffer(rawSeqStore_t seq) { buffer_t buffer; buffer.start = seq.seq; buffer.capacity = seq.capacity * sizeof(rawSeq); return buffer; } static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) { if (seqPool->bufferSize == 0) { return kNullRawSeqStore; } return bufferToSeq(ZSTDMT_getBuffer(seqPool)); } #if ZSTD_RESIZE_SEQPOOL static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) { return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); } #endif static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) { ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); } static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) { ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq)); } static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) { ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); if (seqPool == NULL) return NULL; ZSTDMT_setNbSeq(seqPool, 0); return seqPool; } static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) { ZSTDMT_freeBufferPool(seqPool); } static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) { return ZSTDMT_expandBufferPool(pool, nbWorkers); } /* ===== CCtx Pool ===== */ /* a single CCtx Pool can be invoked from multiple threads in parallel */ typedef struct { ZSTD_pthread_mutex_t poolMutex; int totalCCtx; int availCCtx; ZSTD_customMem cMem; ZSTD_CCtx* cctx[1]; /* variable size */ } ZSTDMT_CCtxPool; /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) { int cid; for (cid=0; cidtotalCCtx; cid++) ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */ ZSTD_pthread_mutex_destroy(&pool->poolMutex); ZSTD_free(pool, pool->cMem); } /* ZSTDMT_createCCtxPool() : * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) { ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc( sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem); assert(nbWorkers > 0); if (!cctxPool) return NULL; if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { ZSTD_free(cctxPool, cMem); return NULL; } cctxPool->cMem = cMem; cctxPool->totalCCtx = nbWorkers; cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem); if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); return cctxPool; } static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, int nbWorkers) { if (srcPool==NULL) return NULL; if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */ /* need a larger cctx pool */ { ZSTD_customMem const cMem = srcPool->cMem; ZSTDMT_freeCCtxPool(srcPool); return ZSTDMT_createCCtxPool(nbWorkers, cMem); } } /* only works during initialization phase, not during compression */ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) { ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); { unsigned const nbWorkers = cctxPool->totalCCtx; size_t const poolSize = sizeof(*cctxPool) + (nbWorkers-1) * sizeof(ZSTD_CCtx*); unsigned u; size_t totalCCtxSize = 0; for (u=0; ucctx[u]); } ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); assert(nbWorkers > 0); return poolSize + totalCCtxSize; } } static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) { DEBUGLOG(5, "ZSTDMT_getCCtx"); ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); if (cctxPool->availCCtx) { cctxPool->availCCtx--; { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx]; ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); return cctx; } } ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); DEBUGLOG(5, "create one more CCtx"); return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */ } static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) { if (cctx==NULL) return; /* compatibility with release on NULL */ ZSTD_pthread_mutex_lock(&pool->poolMutex); if (pool->availCCtx < pool->totalCCtx) pool->cctx[pool->availCCtx++] = cctx; else { /* pool overflow : should not happen, since totalCCtx==nbWorkers */ DEBUGLOG(4, "CCtx pool overflow : free cctx"); ZSTD_freeCCtx(cctx); } ZSTD_pthread_mutex_unlock(&pool->poolMutex); } /* ==== Serial State ==== */ typedef struct { void const* start; size_t size; } range_t; typedef struct { /* All variables in the struct are protected by mutex. */ ZSTD_pthread_mutex_t mutex; ZSTD_pthread_cond_t cond; ZSTD_CCtx_params params; ldmState_t ldmState; XXH64_state_t xxhState; unsigned nextJobID; /* Protects ldmWindow. * Must be acquired after the main mutex when acquiring both. */ ZSTD_pthread_mutex_t ldmWindowMutex; ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ } serialState_t; static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize, const void* dict, size_t const dictSize, ZSTD_dictContentType_e dictContentType) { /* Adjust parameters */ if (params.ldmParams.enableLdm) { DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); assert(params.ldmParams.hashRateLog < 32); serialState->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength); } else { memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); } serialState->nextJobID = 0; if (params.fParams.checksumFlag) XXH64_reset(&serialState->xxhState, 0); if (params.ldmParams.enableLdm) { ZSTD_customMem cMem = params.customMem; unsigned const hashLog = params.ldmParams.hashLog; size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); unsigned const bucketLog = params.ldmParams.hashLog - params.ldmParams.bucketSizeLog; size_t const bucketSize = (size_t)1 << bucketLog; unsigned const prevBucketLog = serialState->params.ldmParams.hashLog - serialState->params.ldmParams.bucketSizeLog; /* Size the seq pool tables */ ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize)); /* Reset the window */ ZSTD_window_init(&serialState->ldmState.window); /* Resize tables and output space if necessary. */ if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) { ZSTD_free(serialState->ldmState.hashTable, cMem); serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem); } if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) { ZSTD_free(serialState->ldmState.bucketOffsets, cMem); serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem); } if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets) return 1; /* Zero the tables */ memset(serialState->ldmState.hashTable, 0, hashSize); memset(serialState->ldmState.bucketOffsets, 0, bucketSize); /* Update window state and fill hash table with dict */ serialState->ldmState.loadedDictEnd = 0; if (dictSize > 0) { if (dictContentType == ZSTD_dct_rawContent) { BYTE const* const dictEnd = (const BYTE*)dict + dictSize; ZSTD_window_update(&serialState->ldmState.window, dict, dictSize); ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams); serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base); } else { /* don't even load anything */ } } /* Initialize serialState's copy of ldmWindow. */ serialState->ldmWindow = serialState->ldmState.window; } serialState->params = params; serialState->params.jobSize = (U32)jobSize; return 0; } static int ZSTDMT_serialState_init(serialState_t* serialState) { int initError = 0; memset(serialState, 0, sizeof(*serialState)); initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL); initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL); initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL); initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL); return initError; } static void ZSTDMT_serialState_free(serialState_t* serialState) { ZSTD_customMem cMem = serialState->params.customMem; ZSTD_pthread_mutex_destroy(&serialState->mutex); ZSTD_pthread_cond_destroy(&serialState->cond); ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex); ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond); ZSTD_free(serialState->ldmState.hashTable, cMem); ZSTD_free(serialState->ldmState.bucketOffsets, cMem); } static void ZSTDMT_serialState_update(serialState_t* serialState, ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore, range_t src, unsigned jobID) { /* Wait for our turn */ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); while (serialState->nextJobID < jobID) { DEBUGLOG(5, "wait for serialState->cond"); ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex); } /* A future job may error and skip our job */ if (serialState->nextJobID == jobID) { /* It is now our turn, do any processing necessary */ if (serialState->params.ldmParams.enableLdm) { size_t error; assert(seqStore.seq != NULL && seqStore.pos == 0 && seqStore.size == 0 && seqStore.capacity > 0); assert(src.size <= serialState->params.jobSize); ZSTD_window_update(&serialState->ldmState.window, src.start, src.size); error = ZSTD_ldm_generateSequences( &serialState->ldmState, &seqStore, &serialState->params.ldmParams, src.start, src.size); /* We provide a large enough buffer to never fail. */ assert(!ZSTD_isError(error)); (void)error; /* Update ldmWindow to match the ldmState.window and signal the main * thread if it is waiting for a buffer. */ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); serialState->ldmWindow = serialState->ldmState.window; ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); } if (serialState->params.fParams.checksumFlag && src.size > 0) XXH64_update(&serialState->xxhState, src.start, src.size); } /* Now it is the next jobs turn */ serialState->nextJobID++; ZSTD_pthread_cond_broadcast(&serialState->cond); ZSTD_pthread_mutex_unlock(&serialState->mutex); if (seqStore.size > 0) { size_t const err = ZSTD_referenceExternalSequences( jobCCtx, seqStore.seq, seqStore.size); assert(serialState->params.ldmParams.enableLdm); assert(!ZSTD_isError(err)); (void)err; } } static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, unsigned jobID, size_t cSize) { ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); if (serialState->nextJobID <= jobID) { assert(ZSTD_isError(cSize)); (void)cSize; DEBUGLOG(5, "Skipping past job %u because of error", jobID); serialState->nextJobID = jobID + 1; ZSTD_pthread_cond_broadcast(&serialState->cond); ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); ZSTD_window_clear(&serialState->ldmWindow); ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); } ZSTD_pthread_mutex_unlock(&serialState->mutex); } /* ------------------------------------------ */ /* ===== Worker thread ===== */ /* ------------------------------------------ */ static const range_t kNullRange = { NULL, 0 }; typedef struct { size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */ buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */ unsigned jobID; /* set by mtctx, then read by worker => no barrier */ unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ size_t dstFlushed; /* used only by mtctx */ unsigned frameChecksumNeeded; /* used only by mtctx */ } ZSTDMT_jobDescription; #define JOB_ERROR(e) { \ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ job->cSize = e; \ ZSTD_pthread_mutex_unlock(&job->job_mutex); \ goto _endJob; \ } /* ZSTDMT_compressionJob() is a POOL_function type */ static void ZSTDMT_compressionJob(void* jobDescription) { ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); buffer_t dstBuff = job->dstBuff; size_t lastCBlockSize = 0; /* resources */ if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ dstBuff = ZSTDMT_getBuffer(job->bufPool); if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ } if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL) JOB_ERROR(ERROR(memory_allocation)); /* Don't compute the checksum for chunks, since we compute it externally, * but write it in the header. */ if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; /* Don't run LDM for the chunks, since we handle it externally */ jobParams.ldmParams.enableLdm = 0; /* init */ if (job->cdict) { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); assert(job->firstJob); /* only allowed for first job */ if (ZSTD_isError(initError)) JOB_ERROR(initError); } else { /* srcStart points at reloaded section */ U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); } { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ ZSTD_dtlm_fast, NULL, /*cdict*/ &jobParams, pledgedSrcSize); if (ZSTD_isError(initError)) JOB_ERROR(initError); } } /* Perform serial step as early as possible, but after CCtx initialization */ ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); if (ZSTD_isError(hSize)) JOB_ERROR(hSize); DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); ZSTD_invalidateRepCodes(cctx); } /* compress */ { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); const BYTE* ip = (const BYTE*) job->src.start; BYTE* const ostart = (BYTE*)dstBuff.start; BYTE* op = ostart; BYTE* oend = op + dstBuff.capacity; int chunkNb; if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */ DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); assert(job->cSize == 0); for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); ip += chunkSize; op += cSize; assert(op < oend); /* stats */ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); job->cSize += cSize; job->consumed = chunkSize * chunkNb; DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)", (U32)cSize, (U32)job->cSize); ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */ ZSTD_pthread_mutex_unlock(&job->job_mutex); } /* last block */ assert(chunkSize > 0); assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */ if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) { size_t const lastBlockSize1 = job->src.size & (chunkSize-1); size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; size_t const cSize = (job->lastJob) ? ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) : ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); lastCBlockSize = cSize; } } _endJob: ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); if (job->prefix.size > 0) DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start); DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start); /* release resources */ ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); ZSTDMT_releaseCCtx(job->cctxPool, cctx); /* report */ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0); job->cSize += lastCBlockSize; job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */ ZSTD_pthread_cond_signal(&job->job_cond); ZSTD_pthread_mutex_unlock(&job->job_mutex); } /* ------------------------------------------ */ /* ===== Multi-threaded compression ===== */ /* ------------------------------------------ */ typedef struct { range_t prefix; /* read-only non-owned prefix buffer */ buffer_t buffer; size_t filled; } inBuff_t; typedef struct { BYTE* buffer; /* The round input buffer. All jobs get references * to pieces of the buffer. ZSTDMT_tryGetInputRange() * handles handing out job input buffers, and makes * sure it doesn't overlap with any pieces still in use. */ size_t capacity; /* The capacity of buffer. */ size_t pos; /* The position of the current inBuff in the round * buffer. Updated past the end if the inBuff once * the inBuff is sent to the worker thread. * pos <= capacity. */ } roundBuff_t; static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; #define RSYNC_LENGTH 32 typedef struct { U64 hash; U64 hitMask; U64 primePower; } rsyncState_t; struct ZSTDMT_CCtx_s { POOL_ctx* factory; ZSTDMT_jobDescription* jobs; ZSTDMT_bufferPool* bufPool; ZSTDMT_CCtxPool* cctxPool; ZSTDMT_seqPool* seqPool; ZSTD_CCtx_params params; size_t targetSectionSize; size_t targetPrefixSize; int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ inBuff_t inBuff; roundBuff_t roundBuff; serialState_t serial; rsyncState_t rsync; unsigned singleBlockingThread; unsigned jobIDMask; unsigned doneJobID; unsigned nextJobID; unsigned frameEnded; unsigned allJobsCompleted; unsigned long long frameContentSize; unsigned long long consumed; unsigned long long produced; ZSTD_customMem cMem; ZSTD_CDict* cdictLocal; const ZSTD_CDict* cdict; }; static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem) { U32 jobNb; if (jobTable == NULL) return; for (jobNb=0; jobNb mtctx->jobIDMask+1) { /* need more job capacity */ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); mtctx->jobIDMask = 0; mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); if (mtctx->jobs==NULL) return ERROR(memory_allocation); assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */ mtctx->jobIDMask = nbJobs - 1; } return 0; } /* ZSTDMT_CCtxParam_setNbWorkers(): * Internal use only */ size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) { return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers); } MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem) { ZSTDMT_CCtx* mtctx; U32 nbJobs = nbWorkers + 2; int initError; DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers); if (nbWorkers < 1) return NULL; nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX); if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL)) /* invalid custom allocator */ return NULL; mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem); if (!mtctx) return NULL; ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); mtctx->cMem = cMem; mtctx->allJobsCompleted = 1; mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ mtctx->jobIDMask = nbJobs - 1; mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); initError = ZSTDMT_serialState_init(&mtctx->serial); mtctx->roundBuff = kNullRoundBuff; if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) { ZSTDMT_freeCCtx(mtctx); return NULL; } DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers); return mtctx; } ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem) { #ifdef ZSTD_MULTITHREAD return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem); #else (void)nbWorkers; (void)cMem; return NULL; #endif } ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers) { return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem); } /* ZSTDMT_releaseAllJobResources() : * note : ensure all workers are killed first ! */ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) { unsigned jobID; DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { /* Copy the mutex/cond out */ ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); /* Clear the job description, but keep the mutex/cond */ memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); mtctx->jobs[jobID].job_mutex = mutex; mtctx->jobs[jobID].job_cond = cond; } mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; mtctx->allJobsCompleted = 1; } static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx) { DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); while (mtctx->doneJobID < mtctx->nextJobID) { unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask; ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex); while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */ ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); } ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); mtctx->doneJobID++; } } size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) { if (mtctx==NULL) return 0; /* compatible with free on NULL */ POOL_free(mtctx->factory); /* stop and free worker threads */ ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); ZSTDMT_freeBufferPool(mtctx->bufPool); ZSTDMT_freeCCtxPool(mtctx->cctxPool); ZSTDMT_freeSeqPool(mtctx->seqPool); ZSTDMT_serialState_free(&mtctx->serial); ZSTD_freeCDict(mtctx->cdictLocal); if (mtctx->roundBuff.buffer) ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); ZSTD_free(mtctx, mtctx->cMem); return 0; } size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) { if (mtctx == NULL) return 0; /* supports sizeof NULL */ return sizeof(*mtctx) + POOL_sizeof(mtctx->factory) + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription) + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + ZSTD_sizeof_CDict(mtctx->cdictLocal) + mtctx->roundBuff.capacity; } /* Internal only */ size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value) { DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter"); switch(parameter) { case ZSTDMT_p_jobSize : DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value); return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value); case ZSTDMT_p_overlapLog : DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value); return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value); case ZSTDMT_p_rsyncable : DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value); return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value); default : return ERROR(parameter_unsupported); } } size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value) { DEBUGLOG(4, "ZSTDMT_setMTCtxParameter"); return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value); } size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value) { switch (parameter) { case ZSTDMT_p_jobSize: return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value); case ZSTDMT_p_overlapLog: return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value); case ZSTDMT_p_rsyncable: return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value); default: return ERROR(parameter_unsupported); } } /* Sets parameters relevant to the compression job, * initializing others to default values. */ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params) { ZSTD_CCtx_params jobParams = *params; /* Clear parameters related to multithreading */ jobParams.forceWindow = 0; jobParams.nbWorkers = 0; jobParams.jobSize = 0; jobParams.overlapLog = 0; jobParams.rsyncable = 0; memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t)); memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem)); return jobParams; } /* ZSTDMT_resize() : * @return : error code if fails, 0 on success */ static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) { if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , ""); mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers); if (mtctx->bufPool == NULL) return ERROR(memory_allocation); mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); if (mtctx->seqPool == NULL) return ERROR(memory_allocation); ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); return 0; } /*! ZSTDMT_updateCParams_whileCompressing() : * Updates a selected set of compression parameters, remaining compatible with currently active frame. * New parameters will be applied to next compression job. */ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams) { U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */ int const compressionLevel = cctxParams->compressionLevel; DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)", compressionLevel); mtctx->params.compressionLevel = compressionLevel; { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0); cParams.windowLog = saved_wlog; mtctx->params.cParams = cParams; } } /* ZSTDMT_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads. * Note : mutex will be acquired during statistics collection inside workers. */ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) { ZSTD_frameProgression fps; DEBUGLOG(5, "ZSTDMT_getFrameProgression"); fps.ingested = mtctx->consumed + mtctx->inBuff.filled; fps.consumed = mtctx->consumed; fps.produced = fps.flushed = mtctx->produced; fps.currentJobID = mtctx->nextJobID; fps.nbActiveWorkers = 0; { unsigned jobNb; unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", mtctx->doneJobID, lastJobNb, mtctx->jobReady) for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { unsigned const wJobID = jobNb & mtctx->jobIDMask; ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); { size_t const cResult = jobPtr->cSize; size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; assert(flushed <= produced); fps.ingested += jobPtr->src.size; fps.consumed += jobPtr->consumed; fps.produced += produced; fps.flushed += flushed; fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size); } ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); } } return fps; } size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) { size_t toFlush; unsigned const jobID = mtctx->doneJobID; assert(jobID <= mtctx->nextJobID); if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */ /* look into oldest non-fully-flushed job */ { unsigned const wJobID = jobID & mtctx->jobIDMask; ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID]; ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); { size_t const cResult = jobPtr->cSize; size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; assert(flushed <= produced); assert(jobPtr->consumed <= jobPtr->src.size); toFlush = produced - flushed; /* if toFlush==0, nothing is available to flush. * However, jobID is expected to still be active: * if jobID was already completed and fully flushed, * ZSTDMT_flushProduced() should have already moved onto next job. * Therefore, some input has not yet been consumed. */ if (toFlush==0) { assert(jobPtr->consumed < jobPtr->src.size); } } ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); } return toFlush; } /* ------------------------------------------ */ /* ===== Multi-threaded compression ===== */ /* ------------------------------------------ */ static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params) { unsigned jobLog; if (params->ldmParams.enableLdm) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on chainLog instead. */ jobLog = MAX(21, params->cParams.chainLog + 4); } else { jobLog = MAX(20, params->cParams.windowLog + 2); } return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX); } static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) { switch(strat) { case ZSTD_btultra2: return 9; case ZSTD_btultra: case ZSTD_btopt: return 8; case ZSTD_btlazy2: case ZSTD_lazy2: return 7; case ZSTD_lazy: case ZSTD_greedy: case ZSTD_dfast: case ZSTD_fast: default:; } return 6; } static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) { assert(0 <= ovlog && ovlog <= 9); if (ovlog == 0) return ZSTDMT_overlapLog_default(strat); return ovlog; } static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) { int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); assert(0 <= overlapRLog && overlapRLog <= 8); if (params->ldmParams.enableLdm) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on chainLog instead. * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */ ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - overlapRLog; } assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX); DEBUGLOG(4, "overlapLog : %i", params->overlapLog); DEBUGLOG(4, "overlap size : %i", 1 << ovLog); return (ovLog==0) ? 0 : (size_t)1 << ovLog; } static unsigned ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers) { assert(nbWorkers>0); { size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params); size_t const jobMaxSize = jobSizeTarget << 2; size_t const passSizeMax = jobMaxSize * nbWorkers; unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1; unsigned const nbJobsLarge = multiplier * nbWorkers; unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1; unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers); return (multiplier>1) ? nbJobsLarge : nbJobsSmall; } } /* ZSTDMT_compress_advanced_internal() : * This is a blocking function : it will only give back control to caller after finishing its compression job. */ static size_t ZSTDMT_compress_advanced_internal( ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_CCtx_params params) { ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(¶ms); size_t const overlapSize = ZSTDMT_computeOverlapSize(¶ms); unsigned const nbJobs = ZSTDMT_computeNbJobs(¶ms, srcSize, params.nbWorkers); size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs; size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */ const char* const srcStart = (const char*)src; size_t remainingSrcSize = srcSize; unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */ size_t frameStartPos = 0, dstBufferPos = 0; assert(jobParams.nbWorkers == 0); assert(mtctx->cctxPool->totalCCtx == params.nbWorkers); params.jobSize = (U32)avgJobSize; DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ", nbJobs, (U32)proposedJobSize, (U32)avgJobSize); if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */ ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode"); if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams); } assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */ ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) ); /* LDM doesn't even try to load the dictionary in single-ingestion mode */ if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize, NULL, 0, ZSTD_dct_auto)) return ERROR(memory_allocation); FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) , ""); /* only expands if necessary */ { unsigned u; for (u=0; ujobs[u].prefix.start = srcStart + frameStartPos - dictSize; mtctx->jobs[u].prefix.size = dictSize; mtctx->jobs[u].src.start = srcStart + frameStartPos; mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */ mtctx->jobs[u].consumed = 0; mtctx->jobs[u].cSize = 0; mtctx->jobs[u].cdict = (u==0) ? cdict : NULL; mtctx->jobs[u].fullFrameSize = srcSize; mtctx->jobs[u].params = jobParams; /* do not calculate checksum within sections, but write it in header for first section */ mtctx->jobs[u].dstBuff = dstBuffer; mtctx->jobs[u].cctxPool = mtctx->cctxPool; mtctx->jobs[u].bufPool = mtctx->bufPool; mtctx->jobs[u].seqPool = mtctx->seqPool; mtctx->jobs[u].serial = &mtctx->serial; mtctx->jobs[u].jobID = u; mtctx->jobs[u].firstJob = (u==0); mtctx->jobs[u].lastJob = (u==nbJobs-1); DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize); DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12); POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]); frameStartPos += jobSize; dstBufferPos += dstBufferCapacity; remainingSrcSize -= jobSize; } } /* collect result */ { size_t error = 0, dstPos = 0; unsigned jobID; for (jobID=0; jobIDjobs[jobID].job_mutex); while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID); ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); } ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); DEBUGLOG(5, "ready to write job %u ", jobID); { size_t const cSize = mtctx->jobs[jobID].cSize; if (ZSTD_isError(cSize)) error = cSize; if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); if (jobID) { /* note : job 0 is written directly at dst, which is correct position */ if (!error) memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */ if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */ DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); } } mtctx->jobs[jobID].dstBuff = g_nullBuffer; mtctx->jobs[jobID].cSize = 0; dstPos += cSize ; } } /* for (jobID=0; jobIDserial.xxhState); if (dstPos + 4 > dstCapacity) { error = ERROR(dstSize_tooSmall); } else { DEBUGLOG(4, "writing checksum : %08X \n", checksum); MEM_writeLE32((char*)dst + dstPos, checksum); dstPos += 4; } } if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos); return error ? error : dstPos; } } size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_parameters params, int overlapLog) { ZSTD_CCtx_params cctxParams = mtctx->params; cctxParams.cParams = params.cParams; cctxParams.fParams = params.fParams; assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX); cctxParams.overlapLog = overlapLog; return ZSTDMT_compress_advanced_internal(mtctx, dst, dstCapacity, src, srcSize, cdict, cctxParams); } size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy); params.fParams.contentSizeFlag = 1; return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog); } /* ====================================== */ /* ======= Streaming API ======= */ /* ====================================== */ size_t ZSTDMT_initCStream_internal( ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)", (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx); /* params supposed partially fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ /* init */ if (params.nbWorkers != mtctx->params.nbWorkers) FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , ""); if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX; mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */ if (mtctx->singleBlockingThread) { ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(¶ms); DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode"); assert(singleThreadParams.nbWorkers == 0); return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0], dict, dictSize, cdict, &singleThreadParams, pledgedSrcSize); } DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers); if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ ZSTDMT_waitForAllJobsCompleted(mtctx); ZSTDMT_releaseAllJobResources(mtctx); mtctx->allJobsCompleted = 1; } mtctx->params = params; mtctx->frameContentSize = pledgedSrcSize; if (dict) { ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ params.cParams, mtctx->cMem); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); } else { ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = NULL; mtctx->cdict = cdict; } mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms); DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10)); mtctx->targetSectionSize = params.jobSize; if (mtctx->targetSectionSize == 0) { mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms); } assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX); if (params.rsyncable) { /* Aim for the targetsectionSize as the average job size. */ U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20); U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20; assert(jobSizeMB >= 1); DEBUGLOG(4, "rsyncLog = %u", rsyncBits); mtctx->rsync.hash = 0; mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH); } if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */ DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize); DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10)); ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); { /* If ldm is enabled we need windowSize space. */ size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0; /* Two buffers of slack, plus extra space for the overlap * This is the minimum slack that LDM works with. One extra because * flush might waste up to targetSectionSize-1 bytes. Another extra * for the overlap (if > 0), then one to fill which doesn't overlap * with the LDM window. */ size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0); size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers; /* Compute the total size, and always have enough slack */ size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1); size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers; size_t const capacity = MAX(windowSize, sectionsSize) + slackSize; if (mtctx->roundBuff.capacity < capacity) { if (mtctx->roundBuff.buffer) ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem); if (mtctx->roundBuff.buffer == NULL) { mtctx->roundBuff.capacity = 0; return ERROR(memory_allocation); } mtctx->roundBuff.capacity = capacity; } } DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10)); mtctx->roundBuff.pos = 0; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; mtctx->inBuff.prefix = kNullRange; mtctx->doneJobID = 0; mtctx->nextJobID = 0; mtctx->frameEnded = 0; mtctx->allJobsCompleted = 0; mtctx->consumed = 0; mtctx->produced = 0; if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize, dict, dictSize, dictContentType)) return ERROR(memory_allocation); return 0; } size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize); cctxParams.cParams = params.cParams; cctxParams.fParams = params.fParams; return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL, cctxParams, pledgedSrcSize); } size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) { ZSTD_CCtx_params cctxParams = mtctx->params; if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */ cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict); cctxParams.fParams = fParams; return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict, cctxParams, pledgedSrcSize); } /* ZSTDMT_resetCStream() : * pledgedSrcSize can be zero == unknown (for the time being) * prefer using ZSTD_CONTENTSIZE_UNKNOWN, * as `0` might mean "empty" in the future */ size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize) { if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params, pledgedSrcSize); } size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel); cctxParams.cParams = params.cParams; cctxParams.fParams = params.fParams; return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN); } /* ZSTDMT_writeLastEmptyBlock() * Write a single empty block with an end-of-frame to finish a frame. * Job must be created from streaming variant. * This function is always successful if expected conditions are fulfilled. */ static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) { assert(job->lastJob == 1); assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */ assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */ assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */ job->dstBuff = ZSTDMT_getBuffer(job->bufPool); if (job->dstBuff.start == NULL) { job->cSize = ERROR(memory_allocation); return; } assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */ job->src = kNullRange; job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); assert(!ZSTD_isError(job->cSize)); assert(job->consumed == 0); } static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp) { unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask; int const endFrame = (endOp == ZSTD_e_end); if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full"); assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); return 0; } if (!mtctx->jobReady) { BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start; DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ", mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size); mtctx->jobs[jobID].src.start = src; mtctx->jobs[jobID].src.size = srcSize; assert(mtctx->inBuff.filled >= srcSize); mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; mtctx->jobs[jobID].consumed = 0; mtctx->jobs[jobID].cSize = 0; mtctx->jobs[jobID].params = mtctx->params; mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL; mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; mtctx->jobs[jobID].dstBuff = g_nullBuffer; mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; mtctx->jobs[jobID].bufPool = mtctx->bufPool; mtctx->jobs[jobID].seqPool = mtctx->seqPool; mtctx->jobs[jobID].serial = &mtctx->serial; mtctx->jobs[jobID].jobID = mtctx->nextJobID; mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0); mtctx->jobs[jobID].lastJob = endFrame; mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0); mtctx->jobs[jobID].dstFlushed = 0; /* Update the round buffer pos and clear the input buffer to be reset */ mtctx->roundBuff.pos += srcSize; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; /* Set the prefix */ if (!endFrame) { size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; mtctx->inBuff.prefix.size = newPrefixSize; } else { /* endFrame==1 => no need for another input buffer */ mtctx->inBuff.prefix = kNullRange; mtctx->frameEnded = endFrame; if (mtctx->nextJobID == 0) { /* single job exception : checksum is already calculated directly within worker thread */ mtctx->params.fParams.checksumFlag = 0; } } if ( (srcSize == 0) && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) { DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame"); assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */ ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); mtctx->nextJobID++; return 0; } } DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))", mtctx->nextJobID, (U32)mtctx->jobs[jobID].src.size, mtctx->jobs[jobID].lastJob, mtctx->nextJobID, jobID); if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) { mtctx->nextJobID++; mtctx->jobReady = 0; } else { DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID); mtctx->jobReady = 1; } return 0; } /*! ZSTDMT_flushProduced() : * flush whatever data has been produced but not yet flushed in current job. * move to next job if current one is fully flushed. * `output` : `pos` will be updated with amount of data flushed . * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end) { unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask; DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)", blockToFlush, mtctx->doneJobID, mtctx->nextJobID); assert(output->size >= output->pos); ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); if ( blockToFlush && (mtctx->doneJobID < mtctx->nextJobID) ) { assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */ if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) { DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none", mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size); break; } DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)", mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */ } } /* try to flush something */ { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */ size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */ size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); if (ZSTD_isError(cSize)) { DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s", mtctx->doneJobID, ZSTD_getErrorName(cSize)); ZSTDMT_waitForAllJobsCompleted(mtctx); ZSTDMT_releaseAllJobResources(mtctx); return cSize; } /* add frame checksum if necessary (can only happen once) */ assert(srcConsumed <= srcSize); if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */ && mtctx->jobs[wJobID].frameChecksumNeeded ) { U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState); DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum); MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); cSize += 4; mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */ mtctx->jobs[wJobID].frameChecksumNeeded = 0; } if (cSize > 0) { /* compression is ongoing or completed */ size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos); DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)", (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize); assert(mtctx->doneJobID < mtctx->nextJobID); assert(cSize >= mtctx->jobs[wJobID].dstFlushed); assert(mtctx->jobs[wJobID].dstBuff.start != NULL); if (toFlush > 0) { memcpy((char*)output->dst + output->pos, (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, toFlush); } output->pos += toFlush; mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */ if ( (srcConsumed == srcSize) /* job is completed */ && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */ DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one", mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); DEBUGLOG(5, "dstBuffer released"); mtctx->jobs[wJobID].dstBuff = g_nullBuffer; mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */ mtctx->consumed += srcSize; mtctx->produced += cSize; mtctx->doneJobID++; } } /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */ if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed); if (srcSize > srcConsumed) return 1; /* current job not completely compressed */ } if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */ if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */ if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */ mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */ if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */ return 0; /* internal buffers fully flushed */ } /** * Returns the range of data used by the earliest job that is not yet complete. * If the data of the first job is broken up into two segments, we cover both * sections. */ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) { unsigned const firstJobID = mtctx->doneJobID; unsigned const lastJobID = mtctx->nextJobID; unsigned jobID; for (jobID = firstJobID; jobID < lastJobID; ++jobID) { unsigned const wJobID = jobID & mtctx->jobIDMask; size_t consumed; ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); consumed = mtctx->jobs[wJobID].consumed; ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); if (consumed < mtctx->jobs[wJobID].src.size) { range_t range = mtctx->jobs[wJobID].prefix; if (range.size == 0) { /* Empty prefix */ range = mtctx->jobs[wJobID].src; } /* Job source in multiple segments not supported yet */ assert(range.start <= mtctx->jobs[wJobID].src.start); return range; } } return kNullRange; } /** * Returns non-zero iff buffer and range overlap. */ static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) { BYTE const* const bufferStart = (BYTE const*)buffer.start; BYTE const* const bufferEnd = bufferStart + buffer.capacity; BYTE const* const rangeStart = (BYTE const*)range.start; BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart; if (rangeStart == NULL || bufferStart == NULL) return 0; /* Empty ranges cannot overlap */ if (bufferStart == bufferEnd || rangeStart == rangeEnd) return 0; return bufferStart < rangeEnd && rangeStart < bufferEnd; } static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) { range_t extDict; range_t prefix; DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); extDict.start = window.dictBase + window.lowLimit; extDict.size = window.dictLimit - window.lowLimit; prefix.start = window.base + window.dictLimit; prefix.size = window.nextSrc - (window.base + window.dictLimit); DEBUGLOG(5, "extDict [0x%zx, 0x%zx)", (size_t)extDict.start, (size_t)extDict.start + extDict.size); DEBUGLOG(5, "prefix [0x%zx, 0x%zx)", (size_t)prefix.start, (size_t)prefix.start + prefix.size); return ZSTDMT_isOverlapped(buffer, extDict) || ZSTDMT_isOverlapped(buffer, prefix); } static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) { if (mtctx->params.ldmParams.enableLdm) { ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); DEBUGLOG(5, "source [0x%zx, 0x%zx)", (size_t)buffer.start, (size_t)buffer.start + buffer.capacity); ZSTD_PTHREAD_MUTEX_LOCK(mutex); while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) { DEBUGLOG(5, "Waiting for LDM to finish..."); ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex); } DEBUGLOG(6, "Done waiting for LDM to finish"); ZSTD_pthread_mutex_unlock(mutex); } } /** * Attempts to set the inBuff to the next section to fill. * If any part of the new section is still in use we give up. * Returns non-zero if the buffer is filled. */ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) { range_t const inUse = ZSTDMT_getInputDataInUse(mtctx); size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; size_t const target = mtctx->targetSectionSize; buffer_t buffer; DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); assert(mtctx->inBuff.buffer.start == NULL); assert(mtctx->roundBuff.capacity >= target); if (spaceLeft < target) { /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. * Simply copy the prefix to the beginning in that case. */ BYTE* const start = (BYTE*)mtctx->roundBuff.buffer; size_t const prefixSize = mtctx->inBuff.prefix.size; buffer.start = start; buffer.capacity = prefixSize; if (ZSTDMT_isOverlapped(buffer, inUse)) { DEBUGLOG(5, "Waiting for buffer..."); return 0; } ZSTDMT_waitForLdmComplete(mtctx, buffer); memmove(start, mtctx->inBuff.prefix.start, prefixSize); mtctx->inBuff.prefix.start = start; mtctx->roundBuff.pos = prefixSize; } buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; buffer.capacity = target; if (ZSTDMT_isOverlapped(buffer, inUse)) { DEBUGLOG(5, "Waiting for buffer..."); return 0; } assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix)); ZSTDMT_waitForLdmComplete(mtctx, buffer); DEBUGLOG(5, "Using prefix range [%zx, %zx)", (size_t)mtctx->inBuff.prefix.start, (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size); DEBUGLOG(5, "Using source range [%zx, %zx)", (size_t)buffer.start, (size_t)buffer.start + buffer.capacity); mtctx->inBuff.buffer = buffer; mtctx->inBuff.filled = 0; assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); return 1; } typedef struct { size_t toLoad; /* The number of bytes to load from the input. */ int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ } syncPoint_t; /** * Searches through the input for a synchronization point. If one is found, we * will instruct the caller to flush, and return the number of bytes to load. * Otherwise, we will load as many bytes as possible and instruct the caller * to continue as normal. */ static syncPoint_t findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) { BYTE const* const istart = (BYTE const*)input.src + input.pos; U64 const primePower = mtctx->rsync.primePower; U64 const hitMask = mtctx->rsync.hitMask; syncPoint_t syncPoint; U64 hash; BYTE const* prev; size_t pos; syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled); syncPoint.flush = 0; if (!mtctx->params.rsyncable) /* Rsync is disabled. */ return syncPoint; if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) /* Not enough to compute the hash. * We will miss any synchronization points in this RSYNC_LENGTH byte * window. However, since it depends only in the internal buffers, if the * state is already synchronized, we will remain synchronized. * Additionally, the probability that we miss a synchronization point is * low: RSYNC_LENGTH / targetSectionSize. */ return syncPoint; /* Initialize the loop variables. */ if (mtctx->inBuff.filled >= RSYNC_LENGTH) { /* We have enough bytes buffered to initialize the hash. * Start scanning at the beginning of the input. */ pos = 0; prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); } else { /* We don't have enough bytes buffered to initialize the hash, but * we know we have at least RSYNC_LENGTH bytes total. * Start scanning after the first RSYNC_LENGTH bytes less the bytes * already buffered. */ pos = RSYNC_LENGTH - mtctx->inBuff.filled; prev = (BYTE const*)mtctx->inBuff.buffer.start - pos; hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled); hash = ZSTD_rollingHash_append(hash, istart, pos); } /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll * through the input. If we hit a synchronization point, then cut the * job off, and tell the compressor to flush the job. Otherwise, load * all the bytes and continue as normal. * If we go too long without a synchronization point (targetSectionSize) * then a block will be emitted anyways, but this is okay, since if we * are already synchronized we will remain synchronized. */ for (; pos < syncPoint.toLoad; ++pos) { BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */ hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); if ((hash & hitMask) == hitMask) { syncPoint.toLoad = pos + 1; syncPoint.flush = 1; break; } } return syncPoint; } size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx) { size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; if (hintInSize==0) hintInSize = mtctx->targetSectionSize; return hintInSize; } /** ZSTDMT_compressStream_generic() : * internal use only - exposed to be invoked from zstd_compress.c * assumption : output and input are valid (pos <= size) * @return : minimum amount of data remaining to flush, 0 if none */ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { unsigned forwardInputProgress = 0; DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)", (U32)endOp, (U32)(input->size - input->pos)); assert(output->pos <= output->size); assert(input->pos <= input->size); if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */ return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp); } if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { /* current frame being ended. Only flush/end are allowed */ return ERROR(stage_wrong); } /* single-pass shortcut (note : synchronous-mode) */ if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */ && (mtctx->nextJobID == 0) /* just started */ && (mtctx->inBuff.filled == 0) /* nothing buffered */ && (!mtctx->jobReady) /* no job already created */ && (endOp == ZSTD_e_end) /* end order */ && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */ size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx, (char*)output->dst + output->pos, output->size - output->pos, (const char*)input->src + input->pos, input->size - input->pos, mtctx->cdict, mtctx->params); if (ZSTD_isError(cSize)) return cSize; input->pos = input->size; output->pos += cSize; mtctx->allJobsCompleted = 1; mtctx->frameEnded = 1; return 0; } /* fill input buffer */ if ( (!mtctx->jobReady) && (input->size > input->pos) ) { /* support NULL input */ if (mtctx->inBuff.buffer.start == NULL) { assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */ if (!ZSTDMT_tryGetInputRange(mtctx)) { /* It is only possible for this operation to fail if there are * still compression jobs ongoing. */ DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed"); assert(mtctx->doneJobID != mtctx->nextJobID); } else DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); } if (mtctx->inBuff.buffer.start != NULL) { syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input); if (syncPoint.flush && endOp == ZSTD_e_continue) { endOp = ZSTD_e_flush; } assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u", (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize); memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); input->pos += syncPoint.toLoad; mtctx->inBuff.filled += syncPoint.toLoad; forwardInputProgress = syncPoint.toLoad>0; } if ((input->pos < input->size) && (endOp == ZSTD_e_end)) endOp = ZSTD_e_flush; /* can't end now : not all input consumed */ } if ( (mtctx->jobReady) || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */ || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */ || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */ size_t const jobSize = mtctx->inBuff.filled; assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , ""); } /* check for potential compressed data ready to be flushed */ { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */ if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */ DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush); return remainingToFlush; } } size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) , ""); /* recommended next input size : fill current input buffer */ return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */ } static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame) { size_t const srcSize = mtctx->inBuff.filled; DEBUGLOG(5, "ZSTDMT_flushStream_internal"); if ( mtctx->jobReady /* one job ready for a worker to pick up */ || (srcSize > 0) /* still some data within input buffer */ || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */ DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)", (U32)srcSize, (U32)endFrame); FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) , ""); } /* check if there is any data available to flush */ return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame); } size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) { DEBUGLOG(5, "ZSTDMT_flushStream"); if (mtctx->singleBlockingThread) return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output); return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush); } size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) { DEBUGLOG(4, "ZSTDMT_endStream"); if (mtctx->singleBlockingThread) return ZSTD_endStream(mtctx->cctxPool->cctx[0], output); return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_opt.c0000644000175000017500000015454213771325506025210 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "zstd_compress_internal.h" #include "hist.h" #include "zstd_opt.h" #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ #define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ #define ZSTD_MAX_PRICE (1<<30) #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ /*-************************************* * Price functions for optimal parser ***************************************/ #if 0 /* approximation at bit level */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) #elif 0 /* fractional bit accuracy */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) #else /* opt==approx, ultra==accurate */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) #endif MEM_STATIC U32 ZSTD_bitWeight(U32 stat) { return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); } MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) { U32 const stat = rawStat + 1; U32 const hb = ZSTD_highbit32(stat); U32 const BWeight = hb * BITCOST_MULTIPLIER; U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; U32 const weight = BWeight + FWeight; assert(hb + BITCOST_ACCURACY < 31); return weight; } #if (DEBUGLEVEL>=2) /* debugging function, * @return price in bytes as fractional value * for debug messages only */ MEM_STATIC double ZSTD_fCost(U32 price) { return (double)price / (BITCOST_MULTIPLIER*8); } #endif static int ZSTD_compressedLiterals(optState_t const* const optPtr) { return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed; } static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) { if (ZSTD_compressedLiterals(optPtr)) optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel); optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel); optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel); optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel); } /* ZSTD_downscaleStat() : * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) * return the resulting sum of elements */ static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) { U32 s, sum=0; DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1); assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31); for (s=0; s> (ZSTD_FREQ_DIV+malus)); sum += table[s]; } return sum; } /* ZSTD_rescaleFreqs() : * if first block (detected by optPtr->litLengthSum == 0) : init statistics * take hints from dictionary if there is one * or init from zero, using src for literals stats, or flat 1 for match symbols * otherwise downscale existing stats, to be used as seed for next block. */ static void ZSTD_rescaleFreqs(optState_t* const optPtr, const BYTE* const src, size_t const srcSize, int const optLevel) { int const compressedLiterals = ZSTD_compressedLiterals(optPtr); DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); optPtr->priceType = zop_dynamic; if (optPtr->litLengthSum == 0) { /* first block : init */ if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); optPtr->priceType = zop_predef; } assert(optPtr->symbolCosts != NULL); if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { /* huffman table presumed generated by dictionary */ optPtr->priceType = zop_dynamic; if (compressedLiterals) { unsigned lit; assert(optPtr->litFreq != NULL); optPtr->litSum = 0; for (lit=0; lit<=MaxLit; lit++) { U32 const scaleLog = 11; /* scale to 2K */ U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); assert(bitCost <= scaleLog); optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->litSum += optPtr->litFreq[lit]; } } { unsigned ll; FSE_CState_t llstate; FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); optPtr->litLengthSum = 0; for (ll=0; ll<=MaxLL; ll++) { U32 const scaleLog = 10; /* scale to 1K */ U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); assert(bitCost < scaleLog); optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->litLengthSum += optPtr->litLengthFreq[ll]; } } { unsigned ml; FSE_CState_t mlstate; FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); optPtr->matchLengthSum = 0; for (ml=0; ml<=MaxML; ml++) { U32 const scaleLog = 10; U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); assert(bitCost < scaleLog); optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; } } { unsigned of; FSE_CState_t ofstate; FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); optPtr->offCodeSum = 0; for (of=0; of<=MaxOff; of++) { U32 const scaleLog = 10; U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); assert(bitCost < scaleLog); optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } } else { /* not a dictionary */ assert(optPtr->litFreq != NULL); if (compressedLiterals) { unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); } { unsigned ll; for (ll=0; ll<=MaxLL; ll++) optPtr->litLengthFreq[ll] = 1; } optPtr->litLengthSum = MaxLL+1; { unsigned ml; for (ml=0; ml<=MaxML; ml++) optPtr->matchLengthFreq[ml] = 1; } optPtr->matchLengthSum = MaxML+1; { unsigned of; for (of=0; of<=MaxOff; of++) optPtr->offCodeFreq[of] = 1; } optPtr->offCodeSum = MaxOff+1; } } else { /* new block : re-use previous statistics, scaled down */ if (compressedLiterals) optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); } ZSTD_setBasePrices(optPtr, optLevel); } /* ZSTD_rawLiteralsCost() : * price of literals (only) in specified segment (which length can be 0). * does not include price of literalLength symbol */ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, const optState_t* const optPtr, int optLevel) { if (litLength == 0) return 0; if (!ZSTD_compressedLiterals(optPtr)) return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */ if (optPtr->priceType == zop_predef) return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ /* dynamic statistics */ { U32 price = litLength * optPtr->litSumBasePrice; U32 u; for (u=0; u < litLength; u++) { assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); } return price; } } /* ZSTD_litLengthPrice() : * cost of literalLength symbol */ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) { if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); /* dynamic statistics */ { U32 const llCode = ZSTD_LLcode(litLength); return (LL_bits[llCode] * BITCOST_MULTIPLIER) + optPtr->litLengthSumBasePrice - WEIGHT(optPtr->litLengthFreq[llCode], optLevel); } } /* ZSTD_getMatchPrice() : * Provides the cost of the match part (offset + matchLength) of a sequence * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice(U32 const offset, U32 const matchLength, const optState_t* const optPtr, int const optLevel) { U32 price; U32 const offCode = ZSTD_highbit32(offset+1); U32 const mlBase = matchLength - MINMATCH; assert(matchLength >= MINMATCH); if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); /* dynamic statistics */ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */ /* match Length */ { U32 const mlCode = ZSTD_MLcode(mlBase); price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel)); } price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */ DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price); return price; } /* ZSTD_updateStats() : * assumption : literals + litLengtn <= iend */ static void ZSTD_updateStats(optState_t* const optPtr, U32 litLength, const BYTE* literals, U32 offsetCode, U32 matchLength) { /* literals */ if (ZSTD_compressedLiterals(optPtr)) { U32 u; for (u=0; u < litLength; u++) optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; } /* literal Length */ { U32 const llCode = ZSTD_LLcode(litLength); optPtr->litLengthFreq[llCode]++; optPtr->litLengthSum++; } /* match offset code (0-2=>repCode; 3+=>offset+2) */ { U32 const offCode = ZSTD_highbit32(offsetCode+1); assert(offCode <= MaxOff); optPtr->offCodeFreq[offCode]++; optPtr->offCodeSum++; } /* match Length */ { U32 const mlBase = matchLength - MINMATCH; U32 const mlCode = ZSTD_MLcode(mlBase); optPtr->matchLengthFreq[mlCode]++; optPtr->matchLengthSum++; } } /* ZSTD_readMINMATCH() : * function safe only for comparisons * assumption : memPtr must be at least 4 bytes before end of buffer */ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) { switch (length) { default : case 4 : return MEM_read32(memPtr); case 3 : if (MEM_isLittleEndian()) return MEM_read32(memPtr)<<8; else return MEM_read32(memPtr)>>8; } } /* Update hashTable3 up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* const ip) { U32* const hashTable3 = ms->hashTable3; U32 const hashLog3 = ms->hashLog3; const BYTE* const base = ms->window.base; U32 idx = *nextToUpdate3; U32 const target = (U32)(ip - base); size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3); assert(hashLog3 > 0); while(idx < target) { hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; idx++; } *nextToUpdate3 = target; return hashTable3[hash3]; } /*-************************************* * Binary Tree search ***************************************/ /** ZSTD_insertBt1() : add one or multiple positions to tree. * ip : assumed <= iend-8 . * @return : nb of positions added */ static U32 ZSTD_insertBt1( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, U32 const mls, const int extDict) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hashLog = cParams->hashLog; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; U32 matchIndex = hashTable[h]; size_t commonLengthSmaller=0, commonLengthLarger=0; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* match; const U32 current = (U32)(ip-base); const U32 btLow = btMask >= current ? 0 : current - btMask; U32* smallerPtr = bt + 2*(current&btMask); U32* largerPtr = smallerPtr + 1; U32 dummy32; /* to be nullified at the end */ U32 const windowLow = ms->window.lowLimit; U32 matchEndIdx = current+8+1; size_t bestLength = 8; U32 nbCompares = 1U << cParams->searchLog; #ifdef ZSTD_C_PREDICT U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); predictedSmall += (predictedSmall>0); predictedLarge += (predictedLarge>0); #endif /* ZSTD_C_PREDICT */ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current); assert(ip <= iend-8); /* required for h calculation */ hashTable[h] = current; /* Update Hash Table */ assert(windowLow > 0); while (nbCompares-- && (matchIndex >= windowLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < current); #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ if (matchIndex == predictedSmall) { /* no need to check length, result known */ *smallerPtr = matchIndex; if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ predictedSmall = predictPtr[1] + (predictPtr[1]>0); continue; } if (matchIndex == predictedLarge) { *largerPtr = matchIndex; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ largerPtr = nextPtr; matchIndex = nextPtr[0]; predictedLarge = predictPtr[0] + (predictPtr[0]>0); continue; } #endif if (!extDict || (matchIndex+matchLength >= dictLimit)) { assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */ match = base + matchIndex; matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); } else { match = dictBase + matchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ } if (matchLength > bestLength) { bestLength = matchLength; if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; } if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ } if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ /* match is smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ } else { /* match is larger than current */ *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; { U32 positions = 0; if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */ assert(matchEndIdx > current + 8); return MAX(positions, matchEndIdx - (current + 8)); } } FORCE_INLINE_TEMPLATE void ZSTD_updateTree_internal( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, const U32 mls, const ZSTD_dictMode_e dictMode) { const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32 idx = ms->nextToUpdate; DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", idx, target, dictMode); while(idx < target) { U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); assert(idx < (U32)(idx + forward)); idx += forward; } assert((size_t)(ip - base) <= (size_t)(U32)(-1)); assert((size_t)(iend - base) <= (size_t)(U32)(-1)); ms->nextToUpdate = target; } void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); } FORCE_INLINE_TEMPLATE U32 ZSTD_insertBtAndGetAllMatches ( ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, const U32 rep[ZSTD_REP_NUM], U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ const U32 lengthToBeat, U32 const mls /* template */) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); const BYTE* const base = ms->window.base; U32 const current = (U32)(ip-base); U32 const hashLog = cParams->hashLog; U32 const minMatch = (mls==3) ? 3 : 4; U32* const hashTable = ms->hashTable; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32 matchIndex = hashTable[h]; U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask= (1U << btLog) - 1; size_t commonLengthSmaller=0, commonLengthLarger=0; const BYTE* const dictBase = ms->window.dictBase; U32 const dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; U32 const btLow = (btMask >= current) ? 0 : current - btMask; U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog); U32 const matchLow = windowLow ? windowLow : 1; U32* smallerPtr = bt + 2*(current&btMask); U32* largerPtr = bt + 2*(current&btMask) + 1; U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */ U32 dummy32; /* to be nullified at the end */ U32 mnum = 0; U32 nbCompares = 1U << cParams->searchLog; const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; const ZSTD_compressionParameters* const dmsCParams = dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0; U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0; U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0; U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; size_t bestLength = lengthToBeat-1; DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current); /* check repCode */ assert(ll0 <= 1); /* necessarily 1 or 0 */ { U32 const lastR = ZSTD_REP_NUM + ll0; U32 repCode; for (repCode = ll0; repCode < lastR; repCode++) { U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; U32 const repIndex = current - repOffset; U32 repLen = 0; assert(current >= dictLimit); if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */ /* We must validate the repcode offset because when we're using a dictionary the * valid offset range shrinks when the dictionary goes out of bounds. */ if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) { repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; } } else { /* repIndex < dictLimit || repIndex >= current */ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ? dmsBase + repIndex - dmsIndexDelta : dictBase + repIndex; assert(current >= windowLow); if ( dictMode == ZSTD_extDict && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */ & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; } if (dictMode == ZSTD_dictMatchState && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */ & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; } } /* save longer solution */ if (repLen > bestLength) { DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", repCode, ll0, repOffset, repLen); bestLength = repLen; matches[mnum].off = repCode - ll0; matches[mnum].len = (U32)repLen; mnum++; if ( (repLen > sufficient_len) | (ip+repLen == iLimit) ) { /* best possible */ return mnum; } } } } /* HC3 match finder */ if ((mls == 3) /*static*/ && (bestLength < mls)) { U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); if ((matchIndex3 >= matchLow) & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { size_t mlen; if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) { const BYTE* const match = base + matchIndex3; mlen = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex3; mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); } /* save best solution */ if (mlen >= mls /* == 3 > bestLength */) { DEBUGLOG(8, "found small match with hlog3, of length %u", (U32)mlen); bestLength = mlen; assert(current > matchIndex3); assert(mnum==0); /* no prior solution */ matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE; matches[0].len = (U32)mlen; mnum = 1; if ( (mlen > sufficient_len) | (ip+mlen == iLimit) ) { /* best possible length */ ms->nextToUpdate = current+1; /* skip insertion */ return 1; } } } /* no dictMatchState lookup: dicts don't have a populated HC3 table */ } hashTable[h] = current; /* Update Hash Table */ while (nbCompares-- && (matchIndex >= matchLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); const BYTE* match; size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(current > matchIndex); if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) { assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ match = base + matchIndex; if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); } else { match = dictBase + matchIndex; assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* prepare for match[matchLength] read */ } if (matchLength > bestLength) { DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); assert(matchEndIdx > matchIndex); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ break; /* drop, to preserve bt consistency (miss a little bit of compression) */ } } if (match[matchLength] < ip[matchLength]) { /* match smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */ } else { *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; if (dictMode == ZSTD_dictMatchState && nbCompares) { size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); U32 dictMatchIndex = dms->hashTable[dmsH]; const U32* const dmsBt = dms->chainTable; commonLengthSmaller = commonLengthLarger = 0; while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) { const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dmsBase + dictMatchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart); if (dictMatchIndex+matchLength >= dmsHighLimit) match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */ if (matchLength > bestLength) { matchIndex = dictMatchIndex + dmsIndexDelta; DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { break; /* drop, to guarantee consistency (miss a little bit of compression) */ } } if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ if (match[matchLength] < ip[matchLength]) { commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ } else { /* match is larger than current */ commonLengthLarger = matchLength; dictMatchIndex = nextPtr[0]; } } } assert(matchEndIdx > current+8); ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ return mnum; } FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */ ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode, const U32 rep[ZSTD_REP_NUM], U32 const ll0, U32 const lengthToBeat) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const matchLengthSearch = cParams->minMatch; DEBUGLOG(8, "ZSTD_BtGetAllMatches"); if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode); switch(matchLengthSearch) { case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3); default : case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4); case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5); case 7 : case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6); } } /*-******************************* * Optimal parser *********************************/ static U32 ZSTD_totalLen(ZSTD_optimal_t sol) { return sol.litlen + sol.mlen; } #if 0 /* debug */ static void listStats(const U32* table, int lastEltID) { int const nbElts = lastEltID + 1; int enb; for (enb=0; enb < nbElts; enb++) { (void)table; /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */ RAWLOG(2, "%4i,", table[enb]); } RAWLOG(2, " \n"); } #endif FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const int optLevel, const ZSTD_dictMode_e dictMode) { optState_t* const optStatePtr = &ms->opt; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const BYTE* const prefixStart = base + ms->window.dictLimit; const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; U32 nextToUpdate3 = ms->nextToUpdate; ZSTD_optimal_t* const opt = optStatePtr->priceTable; ZSTD_match_t* const matches = optStatePtr->matchTable; ZSTD_optimal_t lastSequence; /* init */ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u", (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate); assert(optLevel <= 2); ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel); ip += (ip==prefixStart); /* Match Loop */ while (ip < ilimit) { U32 cur, last_pos = 0; /* find first match */ { U32 const litlen = (U32)(ip - anchor); U32 const ll0 = !litlen; U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch); if (!nbMatches) { ip++; continue; } /* initialize opt[0] */ { U32 i ; for (i=0; i immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; U32 const maxOffset = matches[nbMatches-1].off; DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", nbMatches, maxML, maxOffset, (U32)(ip-prefixStart)); if (maxML > sufficient_len) { lastSequence.litlen = litlen; lastSequence.mlen = maxML; lastSequence.off = maxOffset; DEBUGLOG(6, "large match (%u>%u), immediate encoding", maxML, sufficient_len); cur = 0; last_pos = ZSTD_totalLen(lastSequence); goto _shortestPath; } } /* set prices for first matches starting position == 0 */ { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); U32 pos; U32 matchNb; for (pos = 1; pos < minMatch; pos++) { opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ } for (matchNb = 0; matchNb < nbMatches; matchNb++) { U32 const offset = matches[matchNb].off; U32 const end = matches[matchNb].len; for ( ; pos <= end ; pos++ ) { U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); U32 const sequencePrice = literalsPrice + matchPrice; DEBUGLOG(7, "rPos:%u => set initial price : %.2f", pos, ZSTD_fCost(sequencePrice)); opt[pos].mlen = pos; opt[pos].off = offset; opt[pos].litlen = litlen; opt[pos].price = sequencePrice; } } last_pos = pos-1; } } /* check further positions */ for (cur = 1; cur <= last_pos; cur++) { const BYTE* const inr = ip + cur; assert(cur < ZSTD_OPT_NUM); DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) /* Fix current position with one literal if cheaper */ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; int const price = opt[cur-1].price + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); assert(price < 1000000000); /* overflow check */ if (price <= opt[cur].price) { DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); opt[cur].mlen = 0; opt[cur].off = 0; opt[cur].litlen = litlen; opt[cur].price = price; } else { DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); } } /* Set the repcodes of the current position. We must do it here * because we rely on the repcodes of the 2nd to last sequence being * correct to set the next chunks repcodes during the backward * traversal. */ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); assert(cur >= opt[cur].mlen); if (opt[cur].mlen != 0) { U32 const prev = cur - opt[cur].mlen; repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); } else { memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t)); } /* last match must start at a minimum distance of 8 from oend */ if (inr > ilimit) continue; if (cur == last_pos) break; if ( (optLevel==0) /*static_test*/ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1); continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ } { U32 const ll0 = (opt[cur].mlen != 0); U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; U32 const previousPrice = opt[cur].price; U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch); U32 matchNb; if (!nbMatches) { DEBUGLOG(7, "rPos:%u : no match found", cur); continue; } { U32 const maxML = matches[nbMatches-1].len; DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u", inr-istart, cur, nbMatches, maxML); if ( (maxML > sufficient_len) || (cur + maxML >= ZSTD_OPT_NUM) ) { lastSequence.mlen = maxML; lastSequence.off = matches[nbMatches-1].off; lastSequence.litlen = litlen; cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */ last_pos = cur + ZSTD_totalLen(lastSequence); if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ goto _shortestPath; } } /* set prices using matches found at position == cur */ for (matchNb = 0; matchNb < nbMatches; matchNb++) { U32 const offset = matches[matchNb].off; U32 const lastML = matches[matchNb].len; U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; U32 mlen; DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", matchNb, matches[matchNb].off, lastML, litlen); for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ U32 const pos = cur + mlen; int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); if ((pos > last_pos) || (price < opt[pos].price)) { DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */ opt[pos].mlen = mlen; opt[pos].off = offset; opt[pos].litlen = litlen; opt[pos].price = price; } else { DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ } } } } } /* for (cur = 1; cur <= last_pos; cur++) */ lastSequence = opt[last_pos]; cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */ assert(cur < ZSTD_OPT_NUM); /* control overflow*/ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ assert(opt[0].mlen == 0); /* Set the next chunk's repcodes based on the repcodes of the beginning * of the last match, and the last sequence. This avoids us having to * update them while traversing the sequences. */ if (lastSequence.mlen != 0) { repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0); memcpy(rep, &reps, sizeof(reps)); } else { memcpy(rep, opt[cur].rep, sizeof(repcodes_t)); } { U32 const storeEnd = cur + 1; U32 storeStart = storeEnd; U32 seqPos = cur; DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", last_pos, cur); (void)last_pos; assert(storeEnd < ZSTD_OPT_NUM); DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off); opt[storeEnd] = lastSequence; while (seqPos > 0) { U32 const backDist = ZSTD_totalLen(opt[seqPos]); storeStart--; DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off); opt[storeStart] = opt[seqPos]; seqPos = (seqPos > backDist) ? seqPos - backDist : 0; } /* save sequences */ DEBUGLOG(6, "sending selected sequences into seqStore") { U32 storePos; for (storePos=storeStart; storePos <= storeEnd; storePos++) { U32 const llen = opt[storePos].litlen; U32 const mlen = opt[storePos].mlen; U32 const offCode = opt[storePos].off; U32 const advance = llen + mlen; DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", anchor - istart, (unsigned)llen, (unsigned)mlen); if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ assert(storePos == storeEnd); /* must be last sequence */ ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */ continue; /* will finish */ } assert(anchor + llen <= iend); ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH); anchor += advance; ip = anchor; } } ZSTD_setBasePrices(optStatePtr, optLevel); } } /* while (ip < ilimit) */ /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_btopt( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btopt"); return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict); } /* used in 2-pass strategy */ static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) { U32 s, sum=0; assert(ZSTD_FREQ_DIV+bonus >= 0); for (s=0; slitSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); } /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. * only works on first block, with no dictionary and no ldm. * this function cannot error, hence its contract must be respected. */ static void ZSTD_initStats_ultra(ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ memcpy(tmpRep, rep, sizeof(tmpRep)); DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize); assert(ms->opt.litLengthSum == 0); /* first block */ assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */ assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/ /* invalidate first scan from history */ ZSTD_resetSeqStore(seqStore); ms->window.base -= srcSize; ms->window.dictLimit += (U32)srcSize; ms->window.lowLimit = ms->window.dictLimit; ms->nextToUpdate = ms->window.dictLimit; /* re-inforce weight of collected statistics */ ZSTD_upscaleStats(&ms->opt); } size_t ZSTD_compressBlock_btultra( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); } size_t ZSTD_compressBlock_btultra2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { U32 const current = (U32)((const BYTE*)src - ms->window.base); DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); /* 2-pass strategy: * this strategy makes a first pass over first block to collect statistics * and seed next round's statistics with it. * After 1st pass, function forgets everything, and starts a new block. * Consequently, this can only work if no data has been previously loaded in tables, * aka, no dictionary, no prefix, no ldm preprocessing. * The compression ratio gain is generally small (~0.5% on first block), * the cost is 2x cpu time on first block. */ assert(srcSize <= ZSTD_BLOCKSIZE_MAX); if ( (ms->opt.litLengthSum==0) /* first block */ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ && (srcSize > ZSTD_PREDEF_THRESHOLD) ) { ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); } size_t ZSTD_compressBlock_btopt_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btultra_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btopt_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict); } size_t ZSTD_compressBlock_btultra_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict); } /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_lazy.h0000644000175000017500000000526613771325506025370 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LAZY_H #define ZSTD_LAZY_H #if defined (__cplusplus) extern "C" { #endif #include "zstd_compress_internal.h" U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */ size_t ZSTD_compressBlock_btlazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_greedy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btlazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_greedy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_greedy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btlazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_LAZY_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.h0000644000175000017500000000230513771325506030132 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPRESS_LITERALS_H #define ZSTD_COMPRESS_LITERALS_H #include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize); size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, const int bmi2); #endif /* ZSTD_COMPRESS_LITERALS_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/hist.c0000644000175000017500000001652313771325506024305 0ustar useruser00000000000000/* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* --- dependencies --- */ #include "../common/mem.h" /* U32, BYTE, etc. */ #include "../common/debug.h" /* assert, DEBUGLOG */ #include "../common/error_private.h" /* ERROR */ #include "hist.h" /* --- Error management --- */ unsigned HIST_isError(size_t code) { return ERR_isError(code); } /*-************************************************************** * Histogram functions ****************************************************************/ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* const end = ip + srcSize; unsigned maxSymbolValue = *maxSymbolValuePtr; unsigned largestCount=0; memset(count, 0, (maxSymbolValue+1) * sizeof(*count)); if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } while (ip largestCount) largestCount = count[s]; } return largestCount; } typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e; /* HIST_count_parallel_wksp() : * store histogram into 4 intermediate tables, recombined at the end. * this design makes better use of OoO cpus, * and is noticeably faster when some values are heavily repeated. * But it needs some additional workspace for intermediate tables. * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32. * @return : largest histogram frequency, * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */ static size_t HIST_count_parallel_wksp( unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, HIST_checkInput_e check, U32* const workSpace) { const BYTE* ip = (const BYTE*)source; const BYTE* const iend = ip+sourceSize; unsigned maxSymbolValue = *maxSymbolValuePtr; unsigned max=0; U32* const Counting1 = workSpace; U32* const Counting2 = Counting1 + 256; U32* const Counting3 = Counting2 + 256; U32* const Counting4 = Counting3 + 256; memset(workSpace, 0, 4*256*sizeof(unsigned)); /* safety checks */ if (!sourceSize) { memset(count, 0, maxSymbolValue + 1); *maxSymbolValuePtr = 0; return 0; } if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ /* by stripes of 16 bytes */ { U32 cached = MEM_read32(ip); ip += 4; while (ip < iend-15) { U32 c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; } ip-=4; } /* finish last symbols */ while (ipmaxSymbolValue; s--) { Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); } } { U32 s; if (maxSymbolValue > 255) maxSymbolValue = 255; for (s=0; s<=maxSymbolValue; s++) { count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; if (count[s] > max) max = count[s]; } } while (!count[maxSymbolValue]) maxSymbolValue--; *maxSymbolValuePtr = maxSymbolValue; return (size_t)max; } /* HIST_countFast_wksp() : * Same as HIST_countFast(), but using an externally provided scratch buffer. * `workSpace` is a writable buffer which must be 4-bytes aligned, * `workSpaceSize` must be >= HIST_WKSP_SIZE */ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { if (sourceSize < 1500) /* heuristic threshold */ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace); } /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize) { unsigned tmpCounters[HIST_WKSP_SIZE_U32]; return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters)); } /* HIST_count_wksp() : * Same as HIST_count(), but using an externally provided scratch buffer. * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); if (*maxSymbolValuePtr < 255) return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace); *maxSymbolValuePtr = 255; return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); } size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) { unsigned tmpCounters[HIST_WKSP_SIZE_U32]; return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters)); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.c0000644000175000017500000001415413771325506030132 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include "zstd_compress_literals.h" size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE* const)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, ""); switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } memcpy(ostart + flSize, src, srcSize); DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); return srcSize + flSize; } size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE* const)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } ostart[flSize] = *(const BYTE*)src; DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1); return flSize+1; } size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, const int bmi2) { size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; symbolEncodingType_e hType = set_compressed; size_t cLitSize; DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)", disableLiteralCompression, (U32)srcSize); /* Prepare nextEntropy assuming reusing the existing table */ memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); if (disableLiteralCompression) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); /* small ? don't even attempt compression (speed opt) */ # define COMPRESS_LITERALS_SIZE_MIN 63 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); { HUF_repeat repeat = prevHuf->repeatMode; int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; cLitSize = singleStream ? HUF_compress1X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) : HUF_compress4X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2); if (repeat != HUF_repeat_none) { /* reused the existing table */ DEBUGLOG(5, "Reusing previous huffman table"); hType = set_repeat; } } if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) { memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } if (cLitSize==1) { memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); } if (hType == set_compressed) { /* using a newly constructed table */ nextHuf->repeatMode = HUF_repeat_check; } /* Build header */ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; } default: /* not possible : lhSize is {3,4,5} */ assert(0); } DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize)); return lhSize+cLitSize; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.h0000644000175000017500000000241213771325506026666 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_DOUBLE_FAST_H #define ZSTD_DOUBLE_FAST_H #if defined (__cplusplus) extern "C" { #endif #include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm); size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_DOUBLE_FAST_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.h0000644000175000017500000000420113771325506030303 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPRESS_SEQUENCES_H #define ZSTD_COMPRESS_SEQUENCES_H #include "../common/fse.h" /* FSE_repeat, FSE_CTable */ #include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */ typedef enum { ZSTD_defaultDisallowed = 0, ZSTD_defaultAllowed = 1 } ZSTD_defaultPolicy_e; symbolEncodingType_e ZSTD_selectEncodingType( FSE_repeat* repeatMode, unsigned const* count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const* prevCTable, short const* defaultNorm, U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy); size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, unsigned* count, U32 max, const BYTE* codeTable, size_t nbSeq, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, const FSE_CTable* prevCTable, size_t prevCTableSize, void* entropyWorkspace, size_t entropyWorkspaceSize); size_t ZSTD_encodeSequences( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); size_t ZSTD_fseBitCost( FSE_CTable const* ctable, unsigned const* count, unsigned const max); size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, unsigned const* count, unsigned const max); #endif /* ZSTD_COMPRESS_SEQUENCES_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_ldm.c0000644000175000017500000006057013771325506025157 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "zstd_ldm.h" #include "../common/debug.h" #include "zstd_fast.h" /* ZSTD_fillHashTable() */ #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ #define LDM_BUCKET_SIZE_LOG 3 #define LDM_MIN_MATCH_LENGTH 64 #define LDM_HASH_RLOG 7 #define LDM_HASH_CHAR_OFFSET 10 void ZSTD_ldm_adjustParameters(ldmParams_t* params, ZSTD_compressionParameters const* cParams) { params->windowLog = cParams->windowLog; ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; if (cParams->strategy >= ZSTD_btopt) { /* Get out of the way of the optimal parser */ U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength); assert(minMatch >= ZSTD_LDM_MINMATCH_MIN); assert(minMatch <= ZSTD_LDM_MINMATCH_MAX); params->minMatchLength = minMatch; } if (params->hashLog == 0) { params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); assert(params->hashLog <= ZSTD_HASHLOG_MAX); } if (params->hashRateLog == 0) { params->hashRateLog = params->windowLog < params->hashLog ? 0 : params->windowLog - params->hashLog; } params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); } size_t ZSTD_ldm_getTableSize(ldmParams_t params) { size_t const ldmHSize = ((size_t)1) << params.hashLog; size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog); size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); return params.enableLdm ? totalSize : 0; } size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) { return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; } /** ZSTD_ldm_getSmallHash() : * numBits should be <= 32 * If numBits==0, returns 0. * @return : the most significant numBits of value. */ static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits) { assert(numBits <= 32); return numBits == 0 ? 0 : (U32)(value >> (64 - numBits)); } /** ZSTD_ldm_getChecksum() : * numBitsToDiscard should be <= 32 * @return : the next most significant 32 bits after numBitsToDiscard */ static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard) { assert(numBitsToDiscard <= 32); return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF; } /** ZSTD_ldm_getTag() ; * Given the hash, returns the most significant numTagBits bits * after (32 + hbits) bits. * * If there are not enough bits remaining, return the last * numTagBits bits. */ static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits) { assert(numTagBits < 32 && hbits <= 32); if (32 - hbits < numTagBits) { return hash & (((U32)1 << numTagBits) - 1); } else { return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1); } } /** ZSTD_ldm_getBucket() : * Returns a pointer to the start of the bucket associated with hash. */ static ldmEntry_t* ZSTD_ldm_getBucket( ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) { return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); } /** ZSTD_ldm_insertEntry() : * Insert the entry with corresponding hash into the hash table */ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, size_t const hash, const ldmEntry_t entry, ldmParams_t const ldmParams) { BYTE* const bucketOffsets = ldmState->bucketOffsets; *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry; bucketOffsets[hash]++; bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1; } /** ZSTD_ldm_makeEntryAndInsertByTag() : * * Gets the small hash, checksum, and tag from the rollingHash. * * If the tag matches (1 << ldmParams.hashRateLog)-1, then * creates an ldmEntry from the offset, and inserts it into the hash table. * * hBits is the length of the small hash, which is the most significant hBits * of rollingHash. The checksum is the next 32 most significant bits, followed * by ldmParams.hashRateLog bits that make up the tag. */ static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState, U64 const rollingHash, U32 const hBits, U32 const offset, ldmParams_t const ldmParams) { U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog); U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1; if (tag == tagMask) { U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits); U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); ldmEntry_t entry; entry.offset = offset; entry.checksum = checksum; ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams); } } /** ZSTD_ldm_countBackwardsMatch() : * Returns the number of bytes that match backwards before pIn and pMatch. * * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ static size_t ZSTD_ldm_countBackwardsMatch( const BYTE* pIn, const BYTE* pAnchor, const BYTE* pMatch, const BYTE* pBase) { size_t matchLength = 0; while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) { pIn--; pMatch--; matchLength++; } return matchLength; } /** ZSTD_ldm_fillFastTables() : * * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. * This is similar to ZSTD_loadDictionaryContent. * * The tables for the other strategies are filled within their * block compressors. */ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, void const* end) { const BYTE* const iend = (const BYTE*)end; switch(ms->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); break; case ZSTD_dfast: ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; default: assert(0); /* not possible : not a valid strategy id */ } return 0; } /** ZSTD_ldm_fillLdmHashTable() : * * Fills hashTable from (lastHashed + 1) to iend (non-inclusive). * lastHash is the rolling hash that corresponds to lastHashed. * * Returns the rolling hash corresponding to position iend-1. */ static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state, U64 lastHash, const BYTE* lastHashed, const BYTE* iend, const BYTE* base, U32 hBits, ldmParams_t const ldmParams) { U64 rollingHash = lastHash; const BYTE* cur = lastHashed + 1; while (cur < iend) { rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1], cur[ldmParams.minMatchLength-1], state->hashPower); ZSTD_ldm_makeEntryAndInsertByTag(state, rollingHash, hBits, (U32)(cur - base), ldmParams); ++cur; } return rollingHash; } void ZSTD_ldm_fillHashTable( ldmState_t* state, const BYTE* ip, const BYTE* iend, ldmParams_t const* params) { DEBUGLOG(5, "ZSTD_ldm_fillHashTable"); if ((size_t)(iend - ip) >= params->minMatchLength) { U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength); ZSTD_ldm_fillLdmHashTable( state, startingHash, ip, iend - params->minMatchLength, state->window.base, params->hashLog - params->bucketSizeLog, *params); } } /** ZSTD_ldm_limitTableUpdate() : * * Sets cctx->nextToUpdate to a position corresponding closer to anchor * if it is far way * (after a long match, only update tables a limited amount). */ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) { U32 const current = (U32)(anchor - ms->window.base); if (current > ms->nextToUpdate + 1024) { ms->nextToUpdate = current - MIN(512, current - ms->nextToUpdate - 1024); } } static size_t ZSTD_ldm_generateSequences_internal( ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, ldmParams_t const* params, void const* src, size_t srcSize) { /* LDM parameters */ int const extDict = ZSTD_window_hasExtDict(ldmState->window); U32 const minMatchLength = params->minMatchLength; U64 const hashPower = ldmState->hashPower; U32 const hBits = params->hashLog - params->bucketSizeLog; U32 const ldmBucketSize = 1U << params->bucketSizeLog; U32 const hashRateLog = params->hashRateLog; U32 const ldmTagMask = (1U << params->hashRateLog) - 1; /* Prefix and extDict parameters */ U32 const dictLimit = ldmState->window.dictLimit; U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit; BYTE const* const base = ldmState->window.base; BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL; BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL; BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL; BYTE const* const lowPrefixPtr = base + dictLimit; /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE); /* Input positions */ BYTE const* anchor = istart; BYTE const* ip = istart; /* Rolling hash */ BYTE const* lastHashed = NULL; U64 rollingHash = 0; while (ip <= ilimit) { size_t mLength; U32 const current = (U32)(ip - base); size_t forwardMatchLength = 0, backwardMatchLength = 0; ldmEntry_t* bestEntry = NULL; if (ip != istart) { rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0], lastHashed[minMatchLength], hashPower); } else { rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength); } lastHashed = ip; /* Do not insert and do not look for a match */ if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) { ip++; continue; } /* Get the best entry and compute the match lengths */ { ldmEntry_t* const bucket = ZSTD_ldm_getBucket(ldmState, ZSTD_ldm_getSmallHash(rollingHash, hBits), *params); ldmEntry_t* cur; size_t bestMatchLength = 0; U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) { size_t curForwardMatchLength, curBackwardMatchLength, curTotalMatchLength; if (cur->checksum != checksum || cur->offset <= lowestIndex) { continue; } if (extDict) { BYTE const* const curMatchBase = cur->offset < dictLimit ? dictBase : base; BYTE const* const pMatch = curMatchBase + cur->offset; BYTE const* const matchEnd = cur->offset < dictLimit ? dictEnd : iend; BYTE const* const lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; curForwardMatchLength = ZSTD_count_2segments( ip, pMatch, iend, matchEnd, lowPrefixPtr); if (curForwardMatchLength < minMatchLength) { continue; } curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, lowMatchPtr); curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; } else { /* !extDict */ BYTE const* const pMatch = base + cur->offset; curForwardMatchLength = ZSTD_count(ip, pMatch, iend); if (curForwardMatchLength < minMatchLength) { continue; } curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, lowPrefixPtr); curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; } if (curTotalMatchLength > bestMatchLength) { bestMatchLength = curTotalMatchLength; forwardMatchLength = curForwardMatchLength; backwardMatchLength = curBackwardMatchLength; bestEntry = cur; } } } /* No match found -- continue searching */ if (bestEntry == NULL) { ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, current, *params); ip++; continue; } /* Match found */ mLength = forwardMatchLength + backwardMatchLength; ip -= backwardMatchLength; { /* Store the sequence: * ip = current - backwardMatchLength * The match is at (bestEntry->offset - backwardMatchLength) */ U32 const matchIndex = bestEntry->offset; U32 const offset = current - matchIndex; rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; /* Out of sequence storage */ if (rawSeqStore->size == rawSeqStore->capacity) return ERROR(dstSize_tooSmall); seq->litLength = (U32)(ip - anchor); seq->matchLength = (U32)mLength; seq->offset = offset; rawSeqStore->size++; } /* Insert the current entry into the hash table */ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, (U32)(lastHashed - base), *params); assert(ip + backwardMatchLength == lastHashed); /* Fill the hash table from lastHashed+1 to ip+mLength*/ /* Heuristic: don't need to fill the entire table at end of block */ if (ip + mLength <= ilimit) { rollingHash = ZSTD_ldm_fillLdmHashTable( ldmState, rollingHash, lastHashed, ip + mLength, base, hBits, *params); lastHashed = ip + mLength - 1; } ip += mLength; anchor = ip; } return iend - anchor; } /*! ZSTD_ldm_reduceTable() : * reduce table indexes by `reducerValue` */ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, U32 const reducerValue) { U32 u; for (u = 0; u < size; u++) { if (table[u].offset < reducerValue) table[u].offset = 0; else table[u].offset -= reducerValue; } } size_t ZSTD_ldm_generateSequences( ldmState_t* ldmState, rawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize) { U32 const maxDist = 1U << params->windowLog; BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; size_t const kMaxChunkSize = 1 << 20; size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0); size_t chunk; size_t leftoverSize = 0; assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize); /* Check that ZSTD_window_update() has been called for this chunk prior * to passing it to this function. */ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize); /* The input could be very large (in zstdmt), so it must be broken up into * chunks to enforce the maximum distance and handle overflow correction. */ assert(sequences->pos <= sequences->size); assert(sequences->size <= sequences->capacity); for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) { BYTE const* const chunkStart = istart + chunk * kMaxChunkSize; size_t const remaining = (size_t)(iend - chunkStart); BYTE const *const chunkEnd = (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize; size_t const chunkSize = chunkEnd - chunkStart; size_t newLeftoverSize; size_t const prevSize = sequences->size; assert(chunkStart < iend); /* 1. Perform overflow correction if necessary. */ if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) { U32 const ldmHSize = 1U << params->hashLog; U32 const correction = ZSTD_window_correctOverflow( &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart); ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); /* invalidate dictionaries on overflow correction */ ldmState->loadedDictEnd = 0; } /* 2. We enforce the maximum offset allowed. * * kMaxChunkSize should be small enough that we don't lose too much of * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the * the offset against maxDist directly. * * NOTE: Because of dictionaries + sequence splitting we MUST make sure * that any offset used is valid at the END of the sequence, since it may * be split into two sequences. This condition holds when using * ZSTD_window_enforceMaxDist(), but if we move to checking offsets * against maxDist directly, we'll have to carefully handle that case. */ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL); /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */ newLeftoverSize = ZSTD_ldm_generateSequences_internal( ldmState, sequences, params, chunkStart, chunkSize); if (ZSTD_isError(newLeftoverSize)) return newLeftoverSize; /* 4. We add the leftover literals from previous iterations to the first * newly generated sequence, or add the `newLeftoverSize` if none are * generated. */ /* Prepend the leftover literals from the last call */ if (prevSize < sequences->size) { sequences->seq[prevSize].litLength += (U32)leftoverSize; leftoverSize = newLeftoverSize; } else { assert(newLeftoverSize == chunkSize); leftoverSize += chunkSize; } } return 0; } void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; if (srcSize <= seq->litLength) { /* Skip past srcSize literals */ seq->litLength -= (U32)srcSize; return; } srcSize -= seq->litLength; seq->litLength = 0; if (srcSize < seq->matchLength) { /* Skip past the first srcSize of the match */ seq->matchLength -= (U32)srcSize; if (seq->matchLength < minMatch) { /* The match is too short, omit it */ if (rawSeqStore->pos + 1 < rawSeqStore->size) { seq[1].litLength += seq[0].matchLength; } rawSeqStore->pos++; } return; } srcSize -= seq->matchLength; seq->matchLength = 0; rawSeqStore->pos++; } } /** * If the sequence length is longer than remaining then the sequence is split * between this block and the next. * * Returns the current sequence to handle, or if the rest of the block should * be literals, it returns a sequence with offset == 0. */ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, U32 const remaining, U32 const minMatch) { rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; assert(sequence.offset > 0); /* Likely: No partial sequence */ if (remaining >= sequence.litLength + sequence.matchLength) { rawSeqStore->pos++; return sequence; } /* Cut the sequence short (offset == 0 ==> rest is literals). */ if (remaining <= sequence.litLength) { sequence.offset = 0; } else if (remaining < sequence.litLength + sequence.matchLength) { sequence.matchLength = remaining - sequence.litLength; if (sequence.matchLength < minMatch) { sequence.offset = 0; } } /* Skip past `remaining` bytes for the future sequences. */ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); return sequence; } size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const ZSTD_compressionParameters* const cParams = &ms->cParams; unsigned const minMatch = cParams->minMatch; ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms)); /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; /* Input positions */ BYTE const* ip = istart; DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize); assert(rawSeqStore->pos <= rawSeqStore->size); assert(rawSeqStore->size <= rawSeqStore->capacity); /* Loop through each sequence and apply the block compressor to the lits */ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { /* maybeSplitSequence updates rawSeqStore->pos */ rawSeq const sequence = maybeSplitSequence(rawSeqStore, (U32)(iend - ip), minMatch); int i; /* End signal */ if (sequence.offset == 0) break; assert(ip + sequence.litLength + sequence.matchLength <= iend); /* Fill tables for block compressor */ ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); /* Run the block compressor */ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); { size_t const newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); ip += sequence.litLength; /* Update the repcodes */ for (i = ZSTD_REP_NUM - 1; i > 0; i--) rep[i] = rep[i-1]; rep[0] = sequence.offset; /* Store the sequence */ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, sequence.offset + ZSTD_REP_MOVE, sequence.matchLength - MINMATCH); ip += sequence.matchLength; } } /* Fill the tables for the block compressor */ ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); /* Compress the last literals */ return blockCompressor(ms, seqStore, rep, ip, iend - ip); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_fast.c0000644000175000017500000005360413771325506025340 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_fast.h" void ZSTD_fillHashTable(ZSTD_matchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hBits = cParams->hashLog; U32 const mls = cParams->minMatch; const BYTE* const base = ms->window.base; const BYTE* ip = base + ms->nextToUpdate; const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; /* Always insert every fastHashFillStep position into the hash table. * Insert the other positions if their hash entry is empty. */ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { U32 const current = (U32)(ip - base); size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); hashTable[hash0] = current; if (dtlm == ZSTD_dtlm_fast) continue; /* Only load extra positions for ZSTD_dtlm_full */ { U32 p; for (p = 1; p < fastHashFillStep; ++p) { size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); if (hashTable[hash] == 0) { /* not yet filled */ hashTable[hash] = current + p; } } } } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ const BYTE* ip0 = istart; const BYTE* ip1; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; U32 offsetSaved = 0; /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); ip0 += (ip0 == prefixStart); ip1 = ip0 + 1; { U32 const current = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); U32 const maxRep = current - windowLow; if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; } /* Main Search Loop */ #ifdef __INTEL_COMPILER /* From intel 'The vector pragma indicates that the loop should be * vectorized if it is legal to do so'. Can be used together with * #pragma ivdep (but have opted to exclude that because intel * warns against using it).*/ #pragma vector always #endif while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ size_t mLength; BYTE const* ip2 = ip0 + 2; size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); U32 const val0 = MEM_read32(ip0); size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); U32 const val1 = MEM_read32(ip1); U32 const current0 = (U32)(ip0-base); U32 const current1 = (U32)(ip1-base); U32 const matchIndex0 = hashTable[h0]; U32 const matchIndex1 = hashTable[h1]; BYTE const* repMatch = ip2 - offset_1; const BYTE* match0 = base + matchIndex0; const BYTE* match1 = base + matchIndex1; U32 offcode; #if defined(__aarch64__) PREFETCH_L1(ip0+256); #endif hashTable[h0] = current0; /* update hash table */ hashTable[h1] = current1; /* update hash table */ assert(ip0 + 1 == ip1); if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0; ip0 = ip2 - mLength; match0 = repMatch - mLength; mLength += 4; offcode = 0; goto _match; } if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { /* found a regular match */ goto _offset; } if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { /* found a regular match after one literal */ ip0 = ip1; match0 = match1; goto _offset; } { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize; assert(step >= 2); ip0 += step; ip1 += step; continue; } _offset: /* Requires: ip0, match0 */ /* Compute the offset code */ offset_2 = offset_1; offset_1 = (U32)(ip0-match0); offcode = offset_1 + ZSTD_REP_MOVE; mLength = 4; /* Count the backwards match length */ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ _match: /* Requires: ip0, match0, offcode */ /* Count the forward length */ mLength += ZSTD_count(ip0+mLength, match0+mLength, iend); ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH); /* match found */ ip0 += mLength; anchor = ip0; if (ip0 <= ilimit) { /* Fill Table */ assert(base+current0+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) { /* store sequence */ size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); ip0 += rLength; ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH); anchor = ip0; continue; /* faster when present (confirmed on gcc-8) ... (?) */ } } } ip1 = ip0 + 1; } /* save reps for next block */ rep[0] = offset_1 ? offset_1 : offsetSaved; rep[1] = offset_2 ? offset_2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState == NULL); switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7); } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_dictMatchState_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 prefixStartIndex = ms->window.dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; const U32* const dictHashTable = dms->hashTable; const U32 dictStartIndex = dms->window.dictLimit; const BYTE* const dictBase = dms->window.base; const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); const U32 dictHLog = dictCParams->hashLog; /* if a dictionary is still attached, it necessarily means that * it is within window size. So we just check it. */ const U32 maxDistance = 1U << cParams->windowLog; const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ /* ensure there will be no no underflow * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); ip += (dictAndPrefixLength == 0); /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ size_t mLength; size_t const h = ZSTD_hashPtr(ip, hlog, mls); U32 const current = (U32)(ip-base); U32 const matchIndex = hashTable[h]; const BYTE* match = base + matchIndex; const U32 repIndex = current + 1 - offset_1; const BYTE* repMatch = (repIndex < prefixStartIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; hashTable[h] = current; /* update hash table */ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); } else if ( (matchIndex <= prefixStartIndex) ) { size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); U32 const dictMatchIndex = dictHashTable[dictHash]; const BYTE* dictMatch = dictBase + dictMatchIndex; if (dictMatchIndex <= dictStartIndex || MEM_read32(dictMatch) != MEM_read32(ip)) { assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } else { /* found a dict match */ U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta); mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; while (((ip>anchor) & (dictMatch>dictStart)) && (ip[-1] == dictMatch[-1])) { ip--; dictMatch--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } } else if (MEM_read32(match) != MEM_read32(ip)) { /* it's not a match, and we're not going to check the dictionary */ assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } else { /* found a regular match */ U32 const offset = (U32)(ip-match); mLength = ZSTD_count(ip+4, match+4, iend) + 4; while (((ip>anchor) & (match>prefixStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } /* match found */ ip += mLength; anchor = ip; if (ip <= ilimit) { /* Fill Table */ assert(base+current+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1 ? offset_1 : offsetSaved; rep[1] = offset_2 ? offset_2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_fast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState != NULL); switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); } } static size_t ZSTD_compressBlock_fast_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); const U32 dictStartIndex = lowLimit; const BYTE* const dictStart = dictBase + dictStartIndex; const U32 dictLimit = ms->window.dictLimit; const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const dictEnd = dictBase + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); /* switch to "regular" variant if extDict is invalidated due to maxDistance */ if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ const size_t h = ZSTD_hashPtr(ip, hlog, mls); const U32 matchIndex = hashTable[h]; const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; const BYTE* match = matchBase + matchIndex; const U32 current = (U32)(ip-base); const U32 repIndex = current + 1 - offset_1; const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; hashTable[h] = current; /* update hash table */ DEBUGLOG(7, "offset_1 = %u , current = %u", offset_1, current); assert(offset_1 <= current +1); /* check repIndex */ if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH); ip += rLength; anchor = ip; } else { if ( (matchIndex < dictStartIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; U32 const offset = current - matchIndex; size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; /* update offset history */ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); ip += mLength; anchor = ip; } } if (ip <= ilimit) { /* Fill Table */ hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1; rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_fast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); } } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_ldm.h0000644000175000017500000000772613771325506025170 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LDM_H #define ZSTD_LDM_H #if defined (__cplusplus) extern "C" { #endif #include "zstd_compress_internal.h" /* ldmParams_t, U32 */ #include "../zstd.h" /* ZSTD_CCtx, size_t */ /*-************************************* * Long distance matching ***************************************/ #define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT void ZSTD_ldm_fillHashTable( ldmState_t* state, const BYTE* ip, const BYTE* iend, ldmParams_t const* params); /** * ZSTD_ldm_generateSequences(): * * Generates the sequences using the long distance match finder. * Generates long range matching sequences in `sequences`, which parse a prefix * of the source. `sequences` must be large enough to store every sequence, * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. * @returns 0 or an error code. * * NOTE: The user must have called ZSTD_window_update() for all of the input * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. * NOTE: This function returns an error if it runs out of space to store * sequences. */ size_t ZSTD_ldm_generateSequences( ldmState_t* ldms, rawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize); /** * ZSTD_ldm_blockCompress(): * * Compresses a block using the predefined sequences, along with a secondary * block compressor. The literals section of every sequence is passed to the * secondary block compressor, and those sequences are interspersed with the * predefined sequences. Returns the length of the last literals. * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. * `rawSeqStore.seq` may also be updated to split the last sequence between two * blocks. * @return The length of the last literals. * * NOTE: The source must be at most the maximum block size, but the predefined * sequences can be any size, and may be longer than the block. In the case that * they are longer than the block, the last sequences may need to be split into * two. We handle that case correctly, and update `rawSeqStore` appropriately. * NOTE: This function does not return any errors. */ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); /** * ZSTD_ldm_skipSequences(): * * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. * Avoids emitting matches less than `minMatch` bytes. * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). */ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch); /** ZSTD_ldm_getTableSize() : * Estimate the space needed for long distance matching tables or 0 if LDM is * disabled. */ size_t ZSTD_ldm_getTableSize(ldmParams_t params); /** ZSTD_ldm_getSeqSpace() : * Return an upper bound on the number of sequences that can be produced by * the long distance matcher, or 0 if LDM is disabled. */ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); /** ZSTD_ldm_adjustParameters() : * If the params->hashRateLog is not set, set it to its default value based on * windowLog and params->hashLog. * * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to * params->hashLog if it is not). * * Ensures that the minMatchLength >= targetLength during optimal parsing. */ void ZSTD_ldm_adjustParameters(ldmParams_t* params, ZSTD_compressionParameters const* cParams); #if defined (__cplusplus) } #endif #endif /* ZSTD_FAST_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/fse_compress.c0000644000175000017500000006345213771325506026031 0ustar useruser00000000000000/* ****************************************************************** * FSE : Finite State Entropy encoder * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include "../common/compiler.h" #include "../common/mem.h" /* U32, U16, etc. */ #include "../common/debug.h" /* assert, DEBUGLOG */ #include "hist.h" /* HIST_count_wksp */ #include "../common/bitstream.h" #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" #include "../common/error_private.h" /* ************************************************************** * Error Management ****************************************************************/ #define FSE_isError ERR_isError /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * wkspSize should be sized to handle worst case situation, which is `1<>1 : 1) ; FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); U32 const step = FSE_TABLESTEP(tableSize); U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; U32 highThreshold = tableSize-1; /* CTable header */ if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); tableU16[-2] = (U16) tableLog; tableU16[-1] = (U16) maxSymbolValue; assert(tableLog < 16); /* required for threshold strategy to work */ /* For explanations on how to distribute symbol values over the table : * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ #ifdef __clang_analyzer__ memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ #endif /* symbol start positions */ { U32 u; cumul[0] = 0; for (u=1; u <= maxSymbolValue+1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ cumul[u] = cumul[u-1] + 1; tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); } else { cumul[u] = cumul[u-1] + normalizedCounter[u-1]; } } cumul[maxSymbolValue+1] = tableSize+1; } /* Spread symbols */ { U32 position = 0; U32 symbol; for (symbol=0; symbol<=maxSymbolValue; symbol++) { int nbOccurrences; int const freq = normalizedCounter[symbol]; for (nbOccurrences=0; nbOccurrences highThreshold) position = (position + step) & tableMask; /* Low proba area */ } } assert(position==0); /* Must have initialized all positions */ } /* Build table */ { U32 u; for (u=0; u> 3) + 3; return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ } static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, unsigned writeIsSafe) { BYTE* const ostart = (BYTE*) header; BYTE* out = ostart; BYTE* const oend = ostart + headerBufferSize; int nbBits; const int tableSize = 1 << tableLog; int remaining; int threshold; U32 bitStream = 0; int bitCount = 0; unsigned symbol = 0; unsigned const alphabetSize = maxSymbolValue + 1; int previousIs0 = 0; /* Table Size */ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; bitCount += 4; /* Init */ remaining = tableSize+1; /* +1 for extra accuracy */ threshold = tableSize; nbBits = tableLog+1; while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */ if (previousIs0) { unsigned start = symbol; while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++; if (symbol == alphabetSize) break; /* incorrect distribution */ while (symbol >= start+24) { start+=24; bitStream += 0xFFFFU << bitCount; if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE) bitStream; out[1] = (BYTE)(bitStream>>8); out+=2; bitStream>>=16; } while (symbol >= start+3) { start+=3; bitStream += 3 << bitCount; bitCount += 2; } bitStream += (symbol-start) << bitCount; bitCount += 2; if (bitCount>16) { if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; bitStream >>= 16; bitCount -= 16; } } { int count = normalizedCounter[symbol++]; int const max = (2*threshold-1) - remaining; remaining -= count < 0 ? -count : count; count++; /* +1 for extra accuracy */ if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ bitStream += count << bitCount; bitCount += nbBits; bitCount -= (count>=1; } } if (bitCount>16) { if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; bitStream >>= 16; bitCount -= 16; } } if (remaining != 1) return ERROR(GENERIC); /* incorrect normalized distribution */ assert(symbol <= alphabetSize); /* flush remaining bitStream */ if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out+= (bitCount+7) /8; return (out-ostart); } size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */); } /*-************************************************************** * FSE Compression Code ****************************************************************/ FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) { size_t size; if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); return (FSE_CTable*)malloc(size); } void FSE_freeCTable (FSE_CTable* ct) { free(ct); } /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1; U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; assert(srcSize > 1); /* Not supported, RLE should be used instead */ return minBits; } unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) { U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; U32 tableLog = maxTableLog; U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); assert(srcSize > 1); /* Not supported, RLE should be used instead */ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; return tableLog; } unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); } /* Secondary normalization method. To be used when primary method fails. */ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) { short const NOT_YET_ASSIGNED = -2; U32 s; U32 distributed = 0; U32 ToDistribute; /* Init */ U32 const lowThreshold = (U32)(total >> tableLog); U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); for (s=0; s<=maxSymbolValue; s++) { if (count[s] == 0) { norm[s]=0; continue; } if (count[s] <= lowThreshold) { norm[s] = -1; distributed++; total -= count[s]; continue; } if (count[s] <= lowOne) { norm[s] = 1; distributed++; total -= count[s]; continue; } norm[s]=NOT_YET_ASSIGNED; } ToDistribute = (1 << tableLog) - distributed; if (ToDistribute == 0) return 0; if ((total / ToDistribute) > lowOne) { /* risk of rounding to zero */ lowOne = (U32)((total * 3) / (ToDistribute * 2)); for (s=0; s<=maxSymbolValue; s++) { if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { norm[s] = 1; distributed++; total -= count[s]; continue; } } ToDistribute = (1 << tableLog) - distributed; } if (distributed == maxSymbolValue+1) { /* all values are pretty poor; probably incompressible data (should have already been detected); find max, then give all remaining points to max */ U32 maxV = 0, maxC = 0; for (s=0; s<=maxSymbolValue; s++) if (count[s] > maxC) { maxV=s; maxC=count[s]; } norm[maxV] += (short)ToDistribute; return 0; } if (total == 0) { /* all of the symbols were low enough for the lowOne or lowThreshold */ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) if (norm[s] > 0) { ToDistribute--; norm[s]++; } return 0; } { U64 const vStepLog = 62 - tableLog; U64 const mid = (1ULL << (vStepLog-1)) - 1; U64 const rStep = ((((U64)1<> vStepLog); U32 const sEnd = (U32)(end >> vStepLog); U32 const weight = sEnd - sStart; if (weight < 1) return ERROR(GENERIC); norm[s] = (short)weight; tmpTotal = end; } } } return 0; } size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t total, unsigned maxSymbolValue) { /* Sanity checks */ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; U64 const scale = 62 - tableLog; U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ U64 const vStep = 1ULL<<(scale-20); int stillToDistribute = 1<> tableLog); for (s=0; s<=maxSymbolValue; s++) { if (count[s] == total) return 0; /* rle special case */ if (count[s] == 0) { normalizedCounter[s]=0; continue; } if (count[s] <= lowThreshold) { normalizedCounter[s] = -1; stillToDistribute--; } else { short proba = (short)((count[s]*step) >> scale); if (proba<8) { U64 restToBeat = vStep * rtbTable[proba]; proba += (count[s]*step) - ((U64)proba< restToBeat; } if (proba > largestP) { largestP=proba; largest=s; } normalizedCounter[s] = proba; stillToDistribute -= proba; } } if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { /* corner case, need another normalization method */ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); if (FSE_isError(errorCode)) return errorCode; } else normalizedCounter[largest] += (short)stillToDistribute; } #if 0 { /* Print Table (debug) */ U32 s; U32 nTotal = 0; for (s=0; s<=maxSymbolValue; s++) RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]); for (s=0; s<=maxSymbolValue; s++) nTotal += abs(normalizedCounter[s]); if (nTotal != (1U<>1); /* assumption : tableLog >= 1 */ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* header */ tableU16[-2] = (U16) nbBits; tableU16[-1] = (U16) maxSymbolValue; /* Build table */ for (s=0; s FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ FSE_encodeSymbol(&bitC, &CState2, *--ip); FSE_encodeSymbol(&bitC, &CState1, *--ip); FSE_FLUSHBITS(&bitC); } /* 2 or 4 encoding per loop */ while ( ip>istart ) { FSE_encodeSymbol(&bitC, &CState2, *--ip); if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ FSE_FLUSHBITS(&bitC); FSE_encodeSymbol(&bitC, &CState1, *--ip); if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ FSE_encodeSymbol(&bitC, &CState2, *--ip); FSE_encodeSymbol(&bitC, &CState1, *--ip); } FSE_FLUSHBITS(&bitC); } FSE_flushCState(&bitC, &CState2); FSE_flushCState(&bitC, &CState1); return BIT_closeCStream(&bitC); } size_t FSE_compress_usingCTable (void* dst, size_t dstSize, const void* src, size_t srcSize, const FSE_CTable* ct) { unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); if (fast) return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); else return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); } size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } /* FSE_compress_wksp() : * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` size must be `(1< not compressible */ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ } tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); /* Write table description header */ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); op += nc_err; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } /* check compressibility */ if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; return op-ostart; } typedef struct { FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; } fseWkspMax_t; size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) { fseWkspMax_t scratchBuffer; DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); } size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); } #endif /* FSE_COMMONDEFS_ONLY */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.c0000644000175000017500000004535013771325506030310 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include "zstd_compress_sequences.h" /** * -log2(x / 256) lookup table for x in [0, 256). * If x == 0: Return 0 * Else: Return floor(-log2(x / 256) * 256) */ static unsigned const kInverseProbabilityLog256[256] = { 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, 5, 4, 2, 1, }; static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) { void const* ptr = ctable; U16 const* u16ptr = (U16 const*)ptr; U32 const maxSymbolValue = MEM_read16(u16ptr + 1); return maxSymbolValue; } /** * Returns the cost in bytes of encoding the normalized count header. * Returns an error if any of the helper functions return an error. */ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max, size_t const nbSeq, unsigned const FSELog) { BYTE wksp[FSE_NCOUNTBOUND]; S16 norm[MaxSeq + 1]; const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max), ""); return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog); } /** * Returns the cost in bits of encoding the distribution described by count * using the entropy bound. */ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total) { unsigned cost = 0; unsigned s; for (s = 0; s <= max; ++s) { unsigned norm = (unsigned)((256 * count[s]) / total); if (count[s] != 0 && norm == 0) norm = 1; assert(count[s] < total); cost += count[s] * kInverseProbabilityLog256[norm]; } return cost >> 8; } /** * Returns the cost in bits of encoding the distribution in count using ctable. * Returns an error if ctable cannot represent all the symbols in count. */ size_t ZSTD_fseBitCost( FSE_CTable const* ctable, unsigned const* count, unsigned const max) { unsigned const kAccuracyLog = 8; size_t cost = 0; unsigned s; FSE_CState_t cstate; FSE_initCState(&cstate, ctable); if (ZSTD_getFSEMaxSymbolValue(ctable) < max) { DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u", ZSTD_getFSEMaxSymbolValue(ctable), max); return ERROR(GENERIC); } for (s = 0; s <= max; ++s) { unsigned const tableLog = cstate.stateLog; unsigned const badCost = (tableLog + 1) << kAccuracyLog; unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); if (count[s] == 0) continue; if (bitCost >= badCost) { DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s); return ERROR(GENERIC); } cost += (size_t)count[s] * bitCost; } return cost >> kAccuracyLog; } /** * Returns the cost in bits of encoding the distribution in count using the * table described by norm. The max symbol support by norm is assumed >= max. * norm must be valid for every symbol with non-zero probability in count. */ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, unsigned const* count, unsigned const max) { unsigned const shift = 8 - accuracyLog; size_t cost = 0; unsigned s; assert(accuracyLog <= 8); for (s = 0; s <= max; ++s) { unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1; unsigned const norm256 = normAcc << shift; assert(norm256 > 0); assert(norm256 < 256); cost += count[s] * kInverseProbabilityLog256[norm256]; } return cost >> 8; } symbolEncodingType_e ZSTD_selectEncodingType( FSE_repeat* repeatMode, unsigned const* count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const* prevCTable, short const* defaultNorm, U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy) { ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); if (mostFrequent == nbSeq) { *repeatMode = FSE_repeat_none; if (isDefaultAllowed && nbSeq <= 2) { /* Prefer set_basic over set_rle when there are 2 or less symbols, * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. * If basic encoding isn't possible, always choose RLE. */ DEBUGLOG(5, "Selected set_basic"); return set_basic; } DEBUGLOG(5, "Selected set_rle"); return set_rle; } if (strategy < ZSTD_lazy) { if (isDefaultAllowed) { size_t const staticFse_nbSeq_max = 1000; size_t const mult = 10 - strategy; size_t const baseLog = 3; size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */ assert(mult <= 9 && mult >= 7); if ( (*repeatMode == FSE_repeat_valid) && (nbSeq < staticFse_nbSeq_max) ) { DEBUGLOG(5, "Selected set_repeat"); return set_repeat; } if ( (nbSeq < dynamicFse_nbSeq_min) || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) { DEBUGLOG(5, "Selected set_basic"); /* The format allows default tables to be repeated, but it isn't useful. * When using simple heuristics to select encoding type, we don't want * to confuse these tables with dictionaries. When running more careful * analysis, we don't need to waste time checking both repeating tables * and default tables. */ *repeatMode = FSE_repeat_none; return set_basic; } } } else { size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC); size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC); size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); if (isDefaultAllowed) { assert(!ZSTD_isError(basicCost)); assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost))); } assert(!ZSTD_isError(NCountCost)); assert(compressedCost < ERROR(maxCode)); DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost); if (basicCost <= repeatCost && basicCost <= compressedCost) { DEBUGLOG(5, "Selected set_basic"); assert(isDefaultAllowed); *repeatMode = FSE_repeat_none; return set_basic; } if (repeatCost <= compressedCost) { DEBUGLOG(5, "Selected set_repeat"); assert(!ZSTD_isError(repeatCost)); return set_repeat; } assert(compressedCost < basicCost && compressedCost < repeatCost); } DEBUGLOG(5, "Selected set_compressed"); *repeatMode = FSE_repeat_check; return set_compressed; } size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, unsigned* count, U32 max, const BYTE* codeTable, size_t nbSeq, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, const FSE_CTable* prevCTable, size_t prevCTableSize, void* entropyWorkspace, size_t entropyWorkspaceSize) { BYTE* op = (BYTE*)dst; const BYTE* const oend = op + dstCapacity; DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity); switch (type) { case set_rle: FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), ""); RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space"); *op = codeTable[0]; return 1; case set_repeat: memcpy(nextCTable, prevCTable, prevCTableSize); return 0; case set_basic: FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */ return 0; case set_compressed: { S16 norm[MaxSeq + 1]; size_t nbSeq_1 = nbSeq; const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); if (count[codeTable[nbSeq-1]] > 1) { count[codeTable[nbSeq-1]]--; nbSeq_1--; } assert(nbSeq_1 > 1); FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max), ""); { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed"); FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize), ""); return NCountSize; } } default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach"); } } FORCE_INLINE_TEMPLATE size_t ZSTD_encodeSequences_body( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { BIT_CStream_t blockStream; FSE_CState_t stateMatchLength; FSE_CState_t stateOffsetBits; FSE_CState_t stateLitLength; RETURN_ERROR_IF( ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)), dstSize_tooSmall, "not enough space remaining"); DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)", (int)(blockStream.endPtr - blockStream.startPtr), (unsigned)dstCapacity); /* first symbols */ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); if (longOffsets) { U32 const ofBits = ofCodeTable[nbSeq-1]; unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); BIT_flushBits(&blockStream); } BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, ofBits - extraBits); } else { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); } BIT_flushBits(&blockStream); { size_t n; for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) BIT_flushBits(&blockStream); /* (7)*/ BIT_addBits(&blockStream, sequences[n].litLength, llBits); if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); if (longOffsets) { unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[n].offset, extraBits); BIT_flushBits(&blockStream); /* (7)*/ } BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ } else { BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ } BIT_flushBits(&blockStream); /* (7)*/ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); } } DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); FSE_flushCState(&blockStream, &stateMatchLength); DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); FSE_flushCState(&blockStream, &stateOffsetBits); DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); FSE_flushCState(&blockStream, &stateLitLength); { size_t const streamSize = BIT_closeCStream(&blockStream); RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space"); return streamSize; } } static size_t ZSTD_encodeSequences_default( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #if DYNAMIC_BMI2 static TARGET_ATTRIBUTE("bmi2") size_t ZSTD_encodeSequences_bmi2( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #endif size_t ZSTD_encodeSequences( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) { DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); #if DYNAMIC_BMI2 if (bmi2) { return ZSTD_encodeSequences_bmi2(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #endif (void)bmi2; return ZSTD_encodeSequences_default(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress_internal.h0000644000175000017500000013165213771325506030137 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* This header contains definitions * that shall **only** be used by modules within lib/compress. */ #ifndef ZSTD_COMPRESS_H #define ZSTD_COMPRESS_H /*-************************************* * Dependencies ***************************************/ #include "../common/zstd_internal.h" #include "zstd_cwksp.h" #ifdef ZSTD_MULTITHREAD # include "zstdmt_compress.h" #endif #if defined (__cplusplus) extern "C" { #endif /*-************************************* * Constants ***************************************/ #define kSearchStrength 8 #define HASH_READ_SIZE 8 #define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted". It could be confused for a real successor at index "1", if sorted as larger than its predecessor. It's not a big deal though : candidate will just be sorted again. Additionally, candidate position 1 will be lost. But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss. The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy. This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */ /*-************************************* * Context memory management ***************************************/ typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage; typedef struct ZSTD_prefixDict_s { const void* dict; size_t dictSize; ZSTD_dictContentType_e dictContentType; } ZSTD_prefixDict; typedef struct { void* dictBuffer; void const* dict; size_t dictSize; ZSTD_dictContentType_e dictContentType; ZSTD_CDict* cdict; } ZSTD_localDict; typedef struct { U32 CTable[HUF_CTABLE_SIZE_U32(255)]; HUF_repeat repeatMode; } ZSTD_hufCTables_t; typedef struct { FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)]; FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)]; FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)]; FSE_repeat offcode_repeatMode; FSE_repeat matchlength_repeatMode; FSE_repeat litlength_repeatMode; } ZSTD_fseCTables_t; typedef struct { ZSTD_hufCTables_t huf; ZSTD_fseCTables_t fse; } ZSTD_entropyCTables_t; typedef struct { U32 off; U32 len; } ZSTD_match_t; typedef struct { int price; U32 off; U32 mlen; U32 litlen; U32 rep[ZSTD_REP_NUM]; } ZSTD_optimal_t; typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e; typedef struct { /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ unsigned* litFreq; /* table of literals statistics, of size 256 */ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */ U32 litSum; /* nb of literals */ U32 litLengthSum; /* nb of litLength codes */ U32 matchLengthSum; /* nb of matchLength codes */ U32 offCodeSum; /* nb of offset codes */ U32 litSumBasePrice; /* to compare to log2(litfreq) */ U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */ U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */ U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ ZSTD_literalCompressionMode_e literalCompressionMode; } optState_t; typedef struct { ZSTD_entropyCTables_t entropy; U32 rep[ZSTD_REP_NUM]; } ZSTD_compressedBlockState_t; typedef struct { BYTE const* nextSrc; /* next block here to continue on current prefix */ BYTE const* base; /* All regular indexes relative to this position */ BYTE const* dictBase; /* extDict indexes relative to this position */ U32 dictLimit; /* below that point, need extDict */ U32 lowLimit; /* below that point, no more valid data */ } ZSTD_window_t; typedef struct ZSTD_matchState_t ZSTD_matchState_t; struct ZSTD_matchState_t { ZSTD_window_t window; /* State for window round buffer management */ U32 loadedDictEnd; /* index of end of dictionary, within context's referential. * When loadedDictEnd != 0, a dictionary is in use, and still valid. * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance. * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity(). * When dict referential is copied into active context (i.e. not attached), * loadedDictEnd == dictSize, since referential starts from zero. */ U32 nextToUpdate; /* index from which to continue table update */ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ U32* hashTable; U32* hashTable3; U32* chainTable; optState_t opt; /* optimal parser state */ const ZSTD_matchState_t* dictMatchState; ZSTD_compressionParameters cParams; }; typedef struct { ZSTD_compressedBlockState_t* prevCBlock; ZSTD_compressedBlockState_t* nextCBlock; ZSTD_matchState_t matchState; } ZSTD_blockState_t; typedef struct { U32 offset; U32 checksum; } ldmEntry_t; typedef struct { ZSTD_window_t window; /* State for the window round buffer management */ ldmEntry_t* hashTable; U32 loadedDictEnd; BYTE* bucketOffsets; /* Next position in bucket to insert entry */ U64 hashPower; /* Used to compute the rolling hash. * Depends on ldmParams.minMatchLength */ } ldmState_t; typedef struct { U32 enableLdm; /* 1 if enable long distance matching */ U32 hashLog; /* Log size of hashTable */ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ U32 minMatchLength; /* Minimum match length */ U32 hashRateLog; /* Log number of entries to skip */ U32 windowLog; /* Window log for the LDM */ } ldmParams_t; typedef struct { U32 offset; U32 litLength; U32 matchLength; } rawSeq; typedef struct { rawSeq* seq; /* The start of the sequences */ size_t pos; /* The position where reading stopped. <= size. */ size_t size; /* The number of sequences. <= capacity. */ size_t capacity; /* The capacity starting from `seq` pointer */ } rawSeqStore_t; typedef struct { int collectSequences; ZSTD_Sequence* seqStart; size_t seqIndex; size_t maxSequences; } SeqCollector; struct ZSTD_CCtx_params_s { ZSTD_format_e format; ZSTD_compressionParameters cParams; ZSTD_frameParameters fParams; int compressionLevel; int forceWindow; /* force back-references to respect limit of * 1< 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; } /* ZSTD_MLcode() : * note : mlBase = matchLength - MINMATCH; * because it's the format it's stored in seqStore->sequences */ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase) { static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; static const U32 ML_deltaCode = 36; return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; } typedef struct repcodes_s { U32 rep[3]; } repcodes_t; MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0) { repcodes_t newReps; if (offset >= ZSTD_REP_NUM) { /* full offset */ newReps.rep[2] = rep[1]; newReps.rep[1] = rep[0]; newReps.rep[0] = offset - ZSTD_REP_MOVE; } else { /* repcode */ U32 const repCode = offset + ll0; if (repCode > 0) { /* note : if repCode==0, no change */ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2]; newReps.rep[1] = rep[0]; newReps.rep[0] = currentOffset; } else { /* repCode == 0 */ memcpy(&newReps, rep, sizeof(newReps)); } } return newReps; } /* ZSTD_cParam_withinBounds: * @return 1 if value is within cParam bounds, * 0 otherwise */ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) { ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); if (ZSTD_isError(bounds.error)) return 0; if (value < bounds.lowerBound) return 0; if (value > bounds.upperBound) return 0; return 1; } /* ZSTD_noCompressBlock() : * Writes uncompressed block to dst buffer from given src. * Returns the size of the block */ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, dstSize_tooSmall, "dst buf too small for uncompressed block"); MEM_writeLE24(dst, cBlockHeader24); memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); return ZSTD_blockHeaderSize + srcSize; } MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) { BYTE* const op = (BYTE*)dst; U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, ""); MEM_writeLE24(op, cBlockHeader); op[3] = src; return 4; } /* ZSTD_minGain() : * minimum compression required * to generate a compress block or a compressed literals section. * note : use same formula for both situations */ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) { U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); return (srcSize >> minlog) + 2; } MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams) { switch (cctxParams->literalCompressionMode) { case ZSTD_lcm_huffman: return 0; case ZSTD_lcm_uncompressed: return 1; default: assert(0 /* impossible: pre-validated */); /* fall-through */ case ZSTD_lcm_auto: return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); } } /*! ZSTD_safecopyLiterals() : * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single * large copies. */ static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) { assert(iend > ilimit_w); if (ip <= ilimit_w) { ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap); op += ilimit_w - ip; ip = ilimit_w; } while (ip < iend) *op++ = *ip++; } /*! ZSTD_storeSeq() : * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t. * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes). * `mlBase` : matchLength - MINMATCH * Allowed to overread literals up to litLimit. */ HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase) { BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; BYTE const* const litEnd = literals + litLength; #if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6) static const BYTE* g_start = NULL; if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ { U32 const pos = (U32)((const BYTE*)literals - g_start); DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u", pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode); } #endif assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); /* copy Literals */ assert(seqStorePtr->maxNbLit <= 128 KB); assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); assert(literals + litLength <= litLimit); if (litEnd <= litLimit_w) { /* Common case we can use wildcopy. * First copy 16 bytes, because literals are likely short. */ assert(WILDCOPY_OVERLENGTH >= 16); ZSTD_copy16(seqStorePtr->lit, literals); if (litLength > 16) { ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); } } else { ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); } seqStorePtr->lit += litLength; /* literal Length */ if (litLength>0xFFFF) { assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } seqStorePtr->sequences[0].litLength = (U16)litLength; /* match offset */ seqStorePtr->sequences[0].offset = offCode + 1; /* match Length */ if (mlBase>0xFFFF) { assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } seqStorePtr->sequences[0].matchLength = (U16)mlBase; seqStorePtr->sequences++; } /*-************************************* * Match length counter ***************************************/ static unsigned ZSTD_NbCommonBytes (size_t val) { if (MEM_isLittleEndian()) { if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0; # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_ctzll((U64)val) >> 3); # else static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r=0; return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctz((U32)val) >> 3); # else static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; # endif } } else { /* Big Endian CPU */ if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; return _BitScanReverse64( &r, val ) ? (unsigned)(r >> 3) : 0; # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_clzll(val) >> 3); # else unsigned r; const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } r += (!val); return r; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r = 0; return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clz((U32)val) >> 3); # else unsigned r; if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } r += (!val); return r; # endif } } } MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) { const BYTE* const pStart = pIn; const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); if (pIn < pInLoopLimit) { { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); if (diff) return ZSTD_NbCommonBytes(diff); } pIn+=sizeof(size_t); pMatch+=sizeof(size_t); while (pIn < pInLoopLimit) { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } pIn += ZSTD_NbCommonBytes(diff); return (size_t)(pIn - pStart); } } if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } if ((pIn> (32-h) ; } MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ static const U32 prime4bytes = 2654435761U; static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } static const U64 prime5bytes = 889523592379ULL; static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } static const U64 prime6bytes = 227718039650203ULL; static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } static const U64 prime7bytes = 58295818150454627ULL; static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) { switch(mls) { default: case 4: return ZSTD_hash4Ptr(p, hBits); case 5: return ZSTD_hash5Ptr(p, hBits); case 6: return ZSTD_hash6Ptr(p, hBits); case 7: return ZSTD_hash7Ptr(p, hBits); case 8: return ZSTD_hash8Ptr(p, hBits); } } /** ZSTD_ipow() : * Return base^exponent. */ static U64 ZSTD_ipow(U64 base, U64 exponent) { U64 power = 1; while (exponent) { if (exponent & 1) power *= base; exponent >>= 1; base *= base; } return power; } #define ZSTD_ROLL_HASH_CHAR_OFFSET 10 /** ZSTD_rollingHash_append() : * Add the buffer to the hash value. */ static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size) { BYTE const* istart = (BYTE const*)buf; size_t pos; for (pos = 0; pos < size; ++pos) { hash *= prime8bytes; hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET; } return hash; } /** ZSTD_rollingHash_compute() : * Compute the rolling hash value of the buffer. */ MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size) { return ZSTD_rollingHash_append(0, buf, size); } /** ZSTD_rollingHash_primePower() : * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash * over a window of length bytes. */ MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length) { return ZSTD_ipow(prime8bytes, length - 1); } /** ZSTD_rollingHash_rotate() : * Rotate the rolling hash by one byte. */ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower) { hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower; hash *= prime8bytes; hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET; return hash; } /*-************************************* * Round buffer management ***************************************/ #if (ZSTD_WINDOWLOG_MAX_64 > 31) # error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX" #endif /* Max current allowed */ #define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) /* Maximum chunk size before overflow correction needs to be called again */ #define ZSTD_CHUNKSIZE_MAX \ ( ((U32)-1) /* Maximum ending current index */ \ - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */ /** * ZSTD_window_clear(): * Clears the window containing the history by simply setting it to empty. */ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) { size_t const endT = (size_t)(window->nextSrc - window->base); U32 const end = (U32)endT; window->lowLimit = end; window->dictLimit = end; } /** * ZSTD_window_hasExtDict(): * Returns non-zero if the window has a non-empty extDict. */ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) { return window.lowLimit < window.dictLimit; } /** * ZSTD_matchState_dictMode(): * Inspects the provided matchState and figures out what dictMode should be * passed to the compressor. */ MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms) { return ZSTD_window_hasExtDict(ms->window) ? ZSTD_extDict : ms->dictMatchState != NULL ? ZSTD_dictMatchState : ZSTD_noDict; } /** * ZSTD_window_needOverflowCorrection(): * Returns non-zero if the indices are getting too large and need overflow * protection. */ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, void const* srcEnd) { U32 const current = (U32)((BYTE const*)srcEnd - window.base); return current > ZSTD_CURRENT_MAX; } /** * ZSTD_window_correctOverflow(): * Reduces the indices to protect from index overflow. * Returns the correction made to the indices, which must be applied to every * stored index. * * The least significant cycleLog bits of the indices must remain the same, * which may be 0. Every index up to maxDist in the past must be valid. * NOTE: (maxDist & cycleMask) must be zero. */ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, U32 maxDist, void const* src) { /* preemptive overflow correction: * 1. correction is large enough: * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) * > 1<<29 * * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: * After correction, current is less than (1<base < 1<<32. * 3. (cctx->lowLimit + 1< 3<<29 + 1<base); U32 const currentCycle0 = current & cycleMask; /* Exclude zero so that newCurrent - maxDist >= 1. */ U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0; U32 const newCurrent = currentCycle1 + maxDist; U32 const correction = current - newCurrent; assert((maxDist & cycleMask) == 0); assert(current > newCurrent); /* Loose bound, should be around 1<<29 (see above) */ assert(correction > 1<<28); window->base += correction; window->dictBase += correction; if (window->lowLimit <= correction) window->lowLimit = 1; else window->lowLimit -= correction; if (window->dictLimit <= correction) window->dictLimit = 1; else window->dictLimit -= correction; /* Ensure we can still reference the full window. */ assert(newCurrent >= maxDist); assert(newCurrent - maxDist >= 1); /* Ensure that lowLimit and dictLimit didn't underflow. */ assert(window->lowLimit <= newCurrent); assert(window->dictLimit <= newCurrent); DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction, window->lowLimit); return correction; } /** * ZSTD_window_enforceMaxDist(): * Updates lowLimit so that: * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd * * It ensures index is valid as long as index >= lowLimit. * This must be called before a block compression call. * * loadedDictEnd is only defined if a dictionary is in use for current compression. * As the name implies, loadedDictEnd represents the index at end of dictionary. * The value lies within context's referential, it can be directly compared to blockEndIdx. * * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. * This is because dictionaries are allowed to be referenced fully * as long as the last byte of the dictionary is in the window. * Once input has progressed beyond window size, dictionary cannot be referenced anymore. * * In normal dict mode, the dictionary lies between lowLimit and dictLimit. * In dictMatchState mode, lowLimit and dictLimit are the same, * and the dictionary is below them. * forceWindow and dictMatchState are therefore incompatible. */ MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, const ZSTD_matchState_t** dictMatchStatePtr) { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); /* - When there is no dictionary : loadedDictEnd == 0. In which case, the test (blockEndIdx > maxDist) is merely to avoid overflowing next operation `newLowLimit = blockEndIdx - maxDist`. - When there is a standard dictionary : Index referential is copied from the dictionary, which means it starts from 0. In which case, loadedDictEnd == dictSize, and it makes sense to compare `blockEndIdx > maxDist + dictSize` since `blockEndIdx` also starts from zero. - When there is an attached dictionary : loadedDictEnd is expressed within the referential of the context, so it can be directly compared against blockEndIdx. */ if (blockEndIdx > maxDist + loadedDictEnd) { U32 const newLowLimit = blockEndIdx - maxDist; if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit; if (window->dictLimit < window->lowLimit) { DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u", (unsigned)window->dictLimit, (unsigned)window->lowLimit); window->dictLimit = window->lowLimit; } /* On reaching window size, dictionaries are invalidated */ if (loadedDictEndPtr) *loadedDictEndPtr = 0; if (dictMatchStatePtr) *dictMatchStatePtr = NULL; } } /* Similar to ZSTD_window_enforceMaxDist(), * but only invalidates dictionary * when input progresses beyond window size. * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) * loadedDictEnd uses same referential as window->base * maxDist is the window size */ MEM_STATIC void ZSTD_checkDictValidity(const ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, const ZSTD_matchState_t** dictMatchStatePtr) { assert(loadedDictEndPtr != NULL); assert(dictMatchStatePtr != NULL); { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); U32 const loadedDictEnd = *loadedDictEndPtr; DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); assert(blockEndIdx >= loadedDictEnd); if (blockEndIdx > loadedDictEnd + maxDist) { /* On reaching window size, dictionaries are invalidated. * For simplification, if window size is reached anywhere within next block, * the dictionary is invalidated for the full block. */ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); *loadedDictEndPtr = 0; *dictMatchStatePtr = NULL; } else { if (*loadedDictEndPtr != 0) { DEBUGLOG(6, "dictionary considered valid for current block"); } } } } MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { memset(window, 0, sizeof(*window)); window->base = (BYTE const*)""; window->dictBase = (BYTE const*)""; window->dictLimit = 1; /* start from 1, so that 1st position is valid */ window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */ window->nextSrc = window->base + 1; /* see issue #1241 */ } /** * ZSTD_window_update(): * Updates the window by appending [src, src + srcSize) to the window. * If it is not contiguous, the current prefix becomes the extDict, and we * forget about the extDict. Handles overlap of the prefix and extDict. * Returns non-zero if the segment is contiguous. */ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, void const* src, size_t srcSize) { BYTE const* const ip = (BYTE const*)src; U32 contiguous = 1; DEBUGLOG(5, "ZSTD_window_update"); if (srcSize == 0) return contiguous; assert(window->base != NULL); assert(window->dictBase != NULL); /* Check if blocks follow each other */ if (src != window->nextSrc) { /* not contiguous */ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base); DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit); window->lowLimit = window->dictLimit; assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */ window->dictLimit = (U32)distanceFromBase; window->dictBase = window->base; window->base = ip - distanceFromBase; /* ms->nextToUpdate = window->dictLimit; */ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */ contiguous = 0; } window->nextSrc = ip + srcSize; /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ if ( (ip+srcSize > window->dictBase + window->lowLimit) & (ip < window->dictBase + window->dictLimit)) { ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase; U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; window->lowLimit = lowLimitMax; DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit); } return contiguous; } /** * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. */ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog) { U32 const maxDistance = 1U << windowLog; U32 const lowestValid = ms->window.lowLimit; U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid; U32 const isDictionary = (ms->loadedDictEnd != 0); U32 const matchLowest = isDictionary ? lowestValid : withinWindow; return matchLowest; } /** * Returns the lowest allowed match index in the prefix. */ MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog) { U32 const maxDistance = 1U << windowLog; U32 const lowestValid = ms->window.dictLimit; U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid; U32 const isDictionary = (ms->loadedDictEnd != 0); U32 const matchLowest = isDictionary ? lowestValid : withinWindow; return matchLowest; } /* debug functions */ #if (DEBUGLEVEL>=2) MEM_STATIC double ZSTD_fWeight(U32 rawStat) { U32 const fp_accuracy = 8; U32 const fp_multiplier = (1 << fp_accuracy); U32 const newStat = rawStat + 1; U32 const hb = ZSTD_highbit32(newStat); U32 const BWeight = hb * fp_multiplier; U32 const FWeight = (newStat << fp_accuracy) >> hb; U32 const weight = BWeight + FWeight; assert(hb + fp_accuracy < 31); return (double)weight / fp_multiplier; } /* display a table content, * listing each element, its frequency, and its predicted bit cost */ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) { unsigned u, sum; for (u=0, sum=0; u<=max; u++) sum += table[u]; DEBUGLOG(2, "total nb elts: %u", sum); for (u=0; u<=max; u++) { DEBUGLOG(2, "%2u: %5u (%.2f)", u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) ); } } #endif #if defined (__cplusplus) } #endif /* =============================================================== * Shared internal declarations * These prototypes may be called from sources not in lib/compress * =============================================================== */ /* ZSTD_loadCEntropy() : * dict : must point at beginning of a valid zstd dictionary. * return : size of dictionary header (size of magic number + dict ID + entropy tables) * assumptions : magic number supposed already checked * and dictSize >= 8 */ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, short* offcodeNCount, unsigned* offcodeMaxValue, const void* const dict, size_t dictSize); void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); /* ============================================================== * Private declarations * These prototypes shall only be called from within lib/compress * ============================================================== */ /* ZSTD_getCParamsFromCCtxParams() : * cParams are built depending on compressionLevel, src size hints, * LDM and manually set compression parameters. * Note: srcSizeHint == 0 means 0! */ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize); /*! ZSTD_initCStream_internal() : * Private use only. Init streaming operation. * expects params to be valid. * must receive dict, or cdict, or none, but not both. * @return : 0, or an error code */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); void ZSTD_resetSeqStore(seqStore_t* ssPtr); /*! ZSTD_getCParamsFromCDict() : * as the name implies */ ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict); /* ZSTD_compressBegin_advanced_internal() : * Private use only. To be called from zstdmt_compress.c. */ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); /* ZSTD_compress_advanced_internal() : * Private use only. To be called from zstdmt_compress.c. */ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, const ZSTD_CCtx_params* params); /* ZSTD_writeLastEmptyBlock() : * output an empty Block with end-of-frame mark to complete a frame * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) * or an error code if `dstCapacity` is too small ( 1 */ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); #endif /* ZSTD_COMPRESS_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/huf_compress.c0000644000175000017500000007766113771325506026045 0ustar useruser00000000000000/* ****************************************************************** * Huffman encoder, part of New Generation Entropy library * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Includes ****************************************************************/ #include /* memcpy, memset */ #include /* printf (debug) */ #include "../common/compiler.h" #include "../common/bitstream.h" #include "hist.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ #include "../common/fse.h" /* header compression */ #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ /* ************************************************************** * Utils ****************************************************************/ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); } /* ******************************************************* * HUF : Huffman block compression *********************************************************/ /* HUF_compressWeights() : * Same as FSE_compress(), but dedicated to huff0's weights compression. * The use case needs much less stack memory. * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. */ #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const oend = ostart + dstSize; unsigned maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; BYTE scratchBuffer[1< not compressible */ } tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); /* Write table description header */ { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) ); op += hSize; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } return (size_t)(op-ostart); } struct HUF_CElt_s { U16 val; BYTE nbBits; }; /* typedef'd to HUF_CElt within "huf.h" */ /*! HUF_writeCTable() : `CTable` : Huffman tree to save, using huf representation. @return : size of saved CTable */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) { BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; BYTE* op = (BYTE*)dst; U32 n; /* check conditions */ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); /* convert to weight */ bitsToWeight[0] = 0; for (n=1; n1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; return hSize+1; } } /* write raw values as 4-bits (max : 15) */ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ for (n=0; n HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); /* Prepare base value per rank */ { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } } /* fill nbBits */ *hasZeroWeights = 0; { U32 n; for (n=0; nn=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } /* assign value within rank, symbol order */ { U32 n; for (n=0; n maxNbBits */ /* there are several too large elements (at least >= 2) */ { int totalCost = 0; const U32 baseCost = 1 << (largestBits - maxNbBits); int n = (int)lastNonNull; while (huffNode[n].nbBits > maxNbBits) { totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); huffNode[n].nbBits = (BYTE)maxNbBits; n --; } /* n stops at huffNode[n].nbBits <= maxNbBits */ while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ /* renorm totalCost */ totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ /* repay normalized cost */ { U32 const noSymbol = 0xF0F0F0F0; U32 rankLast[HUF_TABLELOG_MAX+2]; /* Get pos of last (smallest) symbol per rank */ memset(rankLast, 0xF0, sizeof(rankLast)); { U32 currentNbBits = maxNbBits; int pos; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currentNbBits) continue; currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ rankLast[maxNbBits-currentNbBits] = (U32)pos; } } while (totalCost > 0) { U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1; for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { U32 const highPos = rankLast[nBitsToDecrease]; U32 const lowPos = rankLast[nBitsToDecrease-1]; if (highPos == noSymbol) continue; if (lowPos == noSymbol) break; { U32 const highTotal = huffNode[highPos].count; U32 const lowTotal = 2 * huffNode[lowPos].count; if (highTotal <= lowTotal) break; } } /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) nBitsToDecrease ++; totalCost -= 1 << (nBitsToDecrease-1); if (rankLast[nBitsToDecrease-1] == noSymbol) rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ huffNode[rankLast[nBitsToDecrease]].nbBits ++; if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol; else { rankLast[nBitsToDecrease]--; if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ } } /* while (totalCost > 0) */ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ while (huffNode[n].nbBits == maxNbBits) n--; huffNode[n+1].nbBits--; assert(n >= 0); rankLast[1] = (U32)(n+1); totalCost++; continue; } huffNode[ rankLast[1] + 1 ].nbBits--; rankLast[1]++; totalCost ++; } } } /* there are several too large elements (at least >= 2) */ return maxNbBits; } typedef struct { U32 base; U32 current; } rankPos; typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; #define RANK_POSITION_TABLE_SIZE 32 typedef struct { huffNodeTable huffNodeTbl; rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; } HUF_buildCTable_wksp_tables; static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition) { U32 n; memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); for (n=0; n<=maxSymbolValue; n++) { U32 r = BIT_highbit32(count[n] + 1); rankPosition[r].base ++; } for (n=30; n>0; n--) rankPosition[n-1].base += rankPosition[n].base; for (n=0; n<32; n++) rankPosition[n].current = rankPosition[n].base; for (n=0; n<=maxSymbolValue; n++) { U32 const c = count[n]; U32 const r = BIT_highbit32(c+1) + 1; U32 pos = rankPosition[r].current++; while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) { huffNode[pos] = huffNode[pos-1]; pos--; } huffNode[pos].count = c; huffNode[pos].byte = (BYTE)n; } } /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). */ #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) { HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace; nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; nodeElt* const huffNode = huffNode0+1; int nonNullRank; int lowS, lowN; int nodeNb = STARTNODE; int n, nodeRoot; /* safety checks */ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) return ERROR(workSpace_tooSmall); if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); memset(huffNode0, 0, sizeof(huffNodeTable)); /* sort, decreasing order */ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); /* init for parents */ nonNullRank = (int)maxSymbolValue; while(huffNode[nonNullRank].count == 0) nonNullRank--; lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; nodeNb++; lowS-=2; for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ /* create parents */ while (nodeNb <= nodeRoot) { int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; nodeNb++; } /* distribute weights (unlimited tree height) */ huffNode[nodeRoot].nbBits = 0; for (n=nodeRoot-1; n>=STARTNODE; n--) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; for (n=0; n<=nonNullRank; n++) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; /* enforce maxTableLog */ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); /* fill result into tree (val, nbBits) */ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; int const alphabetSize = (int)(maxSymbolValue + 1); if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ for (n=0; n<=nonNullRank; n++) nbPerRank[huffNode[n].nbBits]++; /* determine stating value per rank */ { U16 min = 0; for (n=(int)maxNbBits; n>0; n--) { valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } for (n=0; n> 3; } int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { int bad = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { bad |= (count[s] != 0) & (CTable[s].nbBits == 0); } return !bad; } size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } FORCE_INLINE_TEMPLATE void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) { BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); } #define HUF_FLUSHBITS(s) BIT_flushBits(s) #define HUF_FLUSHBITS_1(stream) \ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) #define HUF_FLUSHBITS_2(stream) \ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { const BYTE* ip = (const BYTE*) src; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; size_t n; BIT_CStream_t bitC; /* init */ if (dstSize < 8) return 0; /* not enough space to compress */ { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op)); if (HUF_isError(initErr)) return 0; } n = srcSize & ~3; /* join to mod 4 */ switch (srcSize & 3) { case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); HUF_FLUSHBITS_2(&bitC); /* fall-through */ case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); HUF_FLUSHBITS_1(&bitC); /* fall-through */ case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); HUF_FLUSHBITS(&bitC); /* fall-through */ case 0 : /* fall-through */ default: break; } for (; n>0; n-=4) { /* note : n&3==0 at this stage */ HUF_encodeSymbol(&bitC, ip[n- 1], CTable); HUF_FLUSHBITS_1(&bitC); HUF_encodeSymbol(&bitC, ip[n- 2], CTable); HUF_FLUSHBITS_2(&bitC); HUF_encodeSymbol(&bitC, ip[n- 3], CTable); HUF_FLUSHBITS_1(&bitC); HUF_encodeSymbol(&bitC, ip[n- 4], CTable); HUF_FLUSHBITS(&bitC); } return BIT_closeCStream(&bitC); } #if DYNAMIC_BMI2 static TARGET_ATTRIBUTE("bmi2") size_t HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } static size_t HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, const int bmi2) { if (bmi2) { return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); } return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); } #else static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, const int bmi2) { (void)bmi2; return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } #endif size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ if (srcSize < 12) return 0; /* no saving possible : too small input */ op += 6; /* jumpTable */ assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize==0) return 0; assert(cSize <= 65535); MEM_writeLE16(ostart, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize==0) return 0; assert(cSize <= 65535); MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize==0) return 0; assert(cSize <= 65535); MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); assert(ip <= iend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); if (cSize==0) return 0; op += cSize; } return (size_t)(op-ostart); } size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) { size_t const cSize = (nbStreams==HUF_singleStream) ? HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) : HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; /* check compressibility */ assert(op >= ostart); if ((size_t)(op-ostart) >= srcSize-1) { return 0; } return (size_t)(op-ostart); } typedef struct { unsigned count[HUF_SYMBOLVALUE_MAX + 1]; HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; HUF_buildCTable_wksp_tables buildCTable_wksp; } HUF_compress_tables_t; /* HUF_compress_internal() : * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ static size_t HUF_compress_internal (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void* workSpace, size_t wkspSize, HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, const int bmi2) { HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE); /* checks & inits */ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall); if (!srcSize) return 0; /* Uncompressed */ if (!dstSize) return 0; /* cannot fit anything within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; /* Heuristic : If old table is valid, use it for small inputs */ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } /* Scan input and build symbol stats */ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } /* Check validity of previous table */ if ( repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ if (preferRepeat && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } /* Build Huffman Tree */ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, maxSymbolValue, huffLog, &table->buildCTable_wksp, sizeof(table->buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; /* Zero unused symbols in CTable, so we can check it for validity */ memset(table->CTable + (maxSymbolValue + 1), 0, sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt))); } /* Write table description header */ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) ); /* Check if using previous huffman table is beneficial */ if (repeat && *repeat != HUF_repeat_none) { size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } } /* Use the new huffman table */ if (hSize + 12ul >= srcSize) { return 0; } op += hSize; if (repeat) { *repeat = HUF_repeat_none; } if (oldHufTable) memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, table->CTable, bmi2); } size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2); } size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * provide workspace to generate compression tables */ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * re-use an existing huffman compression table */ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2); } size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_opt.h0000644000175000017500000000373513771325506025212 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_OPT_H #define ZSTD_OPT_H #if defined (__cplusplus) extern "C" { #endif #include "zstd_compress_internal.h" /* used in ZSTD_loadDictionaryContent() */ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); size_t ZSTD_compressBlock_btopt( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btopt_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btopt_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ #if defined (__cplusplus) } #endif #endif /* ZSTD_OPT_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress.c0000644000175000017500000054504613771325506026244 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include /* INT_MAX */ #include /* memset */ #include "../common/cpu.h" #include "../common/mem.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ #include "../common/fse.h" #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_compress_internal.h" #include "zstd_compress_sequences.h" #include "zstd_compress_literals.h" #include "zstd_fast.h" #include "zstd_double_fast.h" #include "zstd_lazy.h" #include "zstd_opt.h" #include "zstd_ldm.h" #include "zstd_compress_superblock.h" /*-************************************* * Helper functions ***************************************/ /* ZSTD_compressBound() * Note that the result from this function is only compatible with the "normal" * full-block strategy. * When there are a lot of small blocks due to frequent flush in streaming mode * the overhead of headers can make the compressed data to be larger than the * return value of ZSTD_compressBound(). */ size_t ZSTD_compressBound(size_t srcSize) { return ZSTD_COMPRESSBOUND(srcSize); } /*-************************************* * Context memory management ***************************************/ struct ZSTD_CDict_s { const void* dictContent; size_t dictContentSize; U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ ZSTD_cwksp workspace; ZSTD_matchState_t matchState; ZSTD_compressedBlockState_t cBlockState; ZSTD_customMem customMem; U32 dictID; int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ }; /* typedef'd to ZSTD_CDict within "zstd.h" */ ZSTD_CCtx* ZSTD_createCCtx(void) { return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); } static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) { assert(cctx != NULL); memset(cctx, 0, sizeof(*cctx)); cctx->customMem = memManager; cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); assert(!ZSTD_isError(err)); (void)err; } } ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) { ZSTD_STATIC_ASSERT(zcss_init==0); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); if (!cctx) return NULL; ZSTD_initCCtx(cctx, customMem); return cctx; } } ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) { ZSTD_cwksp ws; ZSTD_CCtx* cctx; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ ZSTD_cwksp_init(&ws, workspace, workspaceSize); cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); if (cctx == NULL) return NULL; memset(cctx, 0, sizeof(ZSTD_CCtx)); ZSTD_cwksp_move(&cctx->workspace, &ws); cctx->staticSize = workspaceSize; /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ if (!ZSTD_cwksp_check_available(&cctx->workspace, HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, HUF_WORKSPACE_SIZE); cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); return cctx; } /** * Clears and frees all of the dictionaries in the CCtx. */ static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx) { ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem); ZSTD_freeCDict(cctx->localDict.cdict); memset(&cctx->localDict, 0, sizeof(cctx->localDict)); memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); cctx->cdict = NULL; } static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict) { size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0; size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict); return bufferSize + cdictSize; } static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) { assert(cctx != NULL); assert(cctx->staticSize == 0); ZSTD_clearAllDicts(cctx); #ifdef ZSTD_MULTITHREAD ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; #endif ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); } size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "not compatible with static CCtx"); { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); ZSTD_freeCCtxContent(cctx); if (!cctxInWorkspace) { ZSTD_free(cctx, cctx->customMem); } } return 0; } static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD return ZSTDMT_sizeof_CCtx(cctx->mtctx); #else (void)cctx; return 0; #endif } size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support sizeof on NULL */ /* cctx may be in the workspace */ return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx)) + ZSTD_cwksp_sizeof(&cctx->workspace) + ZSTD_sizeof_localDict(cctx->localDict) + ZSTD_sizeof_mtctx(cctx); } size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) { return ZSTD_sizeof_CCtx(zcs); /* same object */ } /* private API call, for dictBuilder only */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { ZSTD_CCtx_params cctxParams; memset(&cctxParams, 0, sizeof(cctxParams)); cctxParams.cParams = cParams; cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ assert(!ZSTD_checkCParams(cParams)); cctxParams.fParams.contentSizeFlag = 1; return cctxParams; } static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( ZSTD_customMem customMem) { ZSTD_CCtx_params* params; if (!customMem.customAlloc ^ !customMem.customFree) return NULL; params = (ZSTD_CCtx_params*)ZSTD_calloc( sizeof(ZSTD_CCtx_params), customMem); if (!params) { return NULL; } params->customMem = customMem; params->compressionLevel = ZSTD_CLEVEL_DEFAULT; params->fParams.contentSizeFlag = 1; return params; } ZSTD_CCtx_params* ZSTD_createCCtxParams(void) { return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); } size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) { if (params == NULL) { return 0; } ZSTD_free(params, params->customMem); return 0; } size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) { return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); } size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->compressionLevel = compressionLevel; cctxParams->fParams.contentSizeFlag = 1; return 0; } size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) { RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); memset(cctxParams, 0, sizeof(*cctxParams)); assert(!ZSTD_checkCParams(params.cParams)); cctxParams->cParams = params.cParams; cctxParams->fParams = params.fParams; cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ return 0; } /* ZSTD_assignParamsToCCtxParams() : * params is presumed valid at this stage */ static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams( const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) { ZSTD_CCtx_params ret = *cctxParams; assert(!ZSTD_checkCParams(params->cParams)); ret.cParams = params->cParams; ret.fParams = params->fParams; ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ return ret; } ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) { ZSTD_bounds bounds = { 0, 0, 0 }; switch(param) { case ZSTD_c_compressionLevel: bounds.lowerBound = ZSTD_minCLevel(); bounds.upperBound = ZSTD_maxCLevel(); return bounds; case ZSTD_c_windowLog: bounds.lowerBound = ZSTD_WINDOWLOG_MIN; bounds.upperBound = ZSTD_WINDOWLOG_MAX; return bounds; case ZSTD_c_hashLog: bounds.lowerBound = ZSTD_HASHLOG_MIN; bounds.upperBound = ZSTD_HASHLOG_MAX; return bounds; case ZSTD_c_chainLog: bounds.lowerBound = ZSTD_CHAINLOG_MIN; bounds.upperBound = ZSTD_CHAINLOG_MAX; return bounds; case ZSTD_c_searchLog: bounds.lowerBound = ZSTD_SEARCHLOG_MIN; bounds.upperBound = ZSTD_SEARCHLOG_MAX; return bounds; case ZSTD_c_minMatch: bounds.lowerBound = ZSTD_MINMATCH_MIN; bounds.upperBound = ZSTD_MINMATCH_MAX; return bounds; case ZSTD_c_targetLength: bounds.lowerBound = ZSTD_TARGETLENGTH_MIN; bounds.upperBound = ZSTD_TARGETLENGTH_MAX; return bounds; case ZSTD_c_strategy: bounds.lowerBound = ZSTD_STRATEGY_MIN; bounds.upperBound = ZSTD_STRATEGY_MAX; return bounds; case ZSTD_c_contentSizeFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_checksumFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_dictIDFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_nbWorkers: bounds.lowerBound = 0; #ifdef ZSTD_MULTITHREAD bounds.upperBound = ZSTDMT_NBWORKERS_MAX; #else bounds.upperBound = 0; #endif return bounds; case ZSTD_c_jobSize: bounds.lowerBound = 0; #ifdef ZSTD_MULTITHREAD bounds.upperBound = ZSTDMT_JOBSIZE_MAX; #else bounds.upperBound = 0; #endif return bounds; case ZSTD_c_overlapLog: #ifdef ZSTD_MULTITHREAD bounds.lowerBound = ZSTD_OVERLAPLOG_MIN; bounds.upperBound = ZSTD_OVERLAPLOG_MAX; #else bounds.lowerBound = 0; bounds.upperBound = 0; #endif return bounds; case ZSTD_c_enableLongDistanceMatching: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_ldmHashLog: bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN; bounds.upperBound = ZSTD_LDM_HASHLOG_MAX; return bounds; case ZSTD_c_ldmMinMatch: bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN; bounds.upperBound = ZSTD_LDM_MINMATCH_MAX; return bounds; case ZSTD_c_ldmBucketSizeLog: bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN; bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX; return bounds; case ZSTD_c_ldmHashRateLog: bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN; bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX; return bounds; /* experimental parameters */ case ZSTD_c_rsyncable: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_forceMaxWindow : bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_format: ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); bounds.lowerBound = ZSTD_f_zstd1; bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */ return bounds; case ZSTD_c_forceAttachDict: ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy); bounds.lowerBound = ZSTD_dictDefaultAttach; bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */ return bounds; case ZSTD_c_literalCompressionMode: ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed); bounds.lowerBound = ZSTD_lcm_auto; bounds.upperBound = ZSTD_lcm_uncompressed; return bounds; case ZSTD_c_targetCBlockSize: bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; return bounds; case ZSTD_c_srcSizeHint: bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN; bounds.upperBound = ZSTD_SRCSIZEHINT_MAX; return bounds; default: bounds.error = ERROR(parameter_unsupported); return bounds; } } /* ZSTD_cParam_clampBounds: * Clamps the value into the bounded range. */ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) { ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); if (ZSTD_isError(bounds.error)) return bounds.error; if (*value < bounds.lowerBound) *value = bounds.lowerBound; if (*value > bounds.upperBound) *value = bounds.upperBound; return 0; } #define BOUNDCHECK(cParam, val) { \ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ parameter_outOfBound, "Param out of bounds"); \ } static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) { switch(param) { case ZSTD_c_compressionLevel: case ZSTD_c_hashLog: case ZSTD_c_chainLog: case ZSTD_c_searchLog: case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: return 1; case ZSTD_c_format: case ZSTD_c_windowLog: case ZSTD_c_contentSizeFlag: case ZSTD_c_checksumFlag: case ZSTD_c_dictIDFlag: case ZSTD_c_forceMaxWindow : case ZSTD_c_nbWorkers: case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: case ZSTD_c_ldmHashRateLog: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: case ZSTD_c_targetCBlockSize: case ZSTD_c_srcSizeHint: default: return 0; } } size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) { DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value); if (cctx->streamStage != zcss_init) { if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); } } switch(param) { case ZSTD_c_nbWorkers: RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported, "MT not compatible with static alloc"); break; case ZSTD_c_compressionLevel: case ZSTD_c_windowLog: case ZSTD_c_hashLog: case ZSTD_c_chainLog: case ZSTD_c_searchLog: case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: case ZSTD_c_ldmHashRateLog: case ZSTD_c_format: case ZSTD_c_contentSizeFlag: case ZSTD_c_checksumFlag: case ZSTD_c_dictIDFlag: case ZSTD_c_forceMaxWindow: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: case ZSTD_c_targetCBlockSize: case ZSTD_c_srcSizeHint: break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int value) { DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value); switch(param) { case ZSTD_c_format : BOUNDCHECK(ZSTD_c_format, value); CCtxParams->format = (ZSTD_format_e)value; return (size_t)CCtxParams->format; case ZSTD_c_compressionLevel : { FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); if (value) { /* 0 : does not change current level */ CCtxParams->compressionLevel = value; } if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel; return 0; /* return type (size_t) cannot represent negative values */ } case ZSTD_c_windowLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_windowLog, value); CCtxParams->cParams.windowLog = (U32)value; return CCtxParams->cParams.windowLog; case ZSTD_c_hashLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_hashLog, value); CCtxParams->cParams.hashLog = (U32)value; return CCtxParams->cParams.hashLog; case ZSTD_c_chainLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_chainLog, value); CCtxParams->cParams.chainLog = (U32)value; return CCtxParams->cParams.chainLog; case ZSTD_c_searchLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_searchLog, value); CCtxParams->cParams.searchLog = (U32)value; return (size_t)value; case ZSTD_c_minMatch : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_minMatch, value); CCtxParams->cParams.minMatch = value; return CCtxParams->cParams.minMatch; case ZSTD_c_targetLength : BOUNDCHECK(ZSTD_c_targetLength, value); CCtxParams->cParams.targetLength = value; return CCtxParams->cParams.targetLength; case ZSTD_c_strategy : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_strategy, value); CCtxParams->cParams.strategy = (ZSTD_strategy)value; return (size_t)CCtxParams->cParams.strategy; case ZSTD_c_contentSizeFlag : /* Content size written in frame header _when known_ (default:1) */ DEBUGLOG(4, "set content size flag = %u", (value!=0)); CCtxParams->fParams.contentSizeFlag = value != 0; return CCtxParams->fParams.contentSizeFlag; case ZSTD_c_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ CCtxParams->fParams.checksumFlag = value != 0; return CCtxParams->fParams.checksumFlag; case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); CCtxParams->fParams.noDictIDFlag = !value; return !CCtxParams->fParams.noDictIDFlag; case ZSTD_c_forceMaxWindow : CCtxParams->forceWindow = (value != 0); return CCtxParams->forceWindow; case ZSTD_c_forceAttachDict : { const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; BOUNDCHECK(ZSTD_c_forceAttachDict, pref); CCtxParams->attachDictPref = pref; return CCtxParams->attachDictPref; } case ZSTD_c_literalCompressionMode : { const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value; BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; } case ZSTD_c_nbWorkers : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); CCtxParams->nbWorkers = value; return CCtxParams->nbWorkers; #endif case ZSTD_c_jobSize : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else /* Adjust to the minimum non-default value. */ if (value != 0 && value < ZSTDMT_JOBSIZE_MIN) value = ZSTDMT_JOBSIZE_MIN; FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); assert(value >= 0); CCtxParams->jobSize = value; return CCtxParams->jobSize; #endif case ZSTD_c_overlapLog : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->overlapLog = value; return CCtxParams->overlapLog; #endif case ZSTD_c_rsyncable : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->rsyncable = value; return CCtxParams->rsyncable; #endif case ZSTD_c_enableLongDistanceMatching : CCtxParams->ldmParams.enableLdm = (value!=0); return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : if (value!=0) /* 0 ==> auto */ BOUNDCHECK(ZSTD_c_ldmHashLog, value); CCtxParams->ldmParams.hashLog = value; return CCtxParams->ldmParams.hashLog; case ZSTD_c_ldmMinMatch : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmMinMatch, value); CCtxParams->ldmParams.minMatchLength = value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_c_ldmBucketSizeLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); CCtxParams->ldmParams.bucketSizeLog = value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN, parameter_outOfBound, "Param out of bounds!"); CCtxParams->ldmParams.hashRateLog = value; return CCtxParams->ldmParams.hashRateLog; case ZSTD_c_targetCBlockSize : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_targetCBlockSize, value); CCtxParams->targetCBlockSize = value; return CCtxParams->targetCBlockSize; case ZSTD_c_srcSizeHint : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_srcSizeHint, value); CCtxParams->srcSizeHint = value; return CCtxParams->srcSizeHint; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value) { return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParams_getParameter( ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value) { switch(param) { case ZSTD_c_format : *value = CCtxParams->format; break; case ZSTD_c_compressionLevel : *value = CCtxParams->compressionLevel; break; case ZSTD_c_windowLog : *value = (int)CCtxParams->cParams.windowLog; break; case ZSTD_c_hashLog : *value = (int)CCtxParams->cParams.hashLog; break; case ZSTD_c_chainLog : *value = (int)CCtxParams->cParams.chainLog; break; case ZSTD_c_searchLog : *value = CCtxParams->cParams.searchLog; break; case ZSTD_c_minMatch : *value = CCtxParams->cParams.minMatch; break; case ZSTD_c_targetLength : *value = CCtxParams->cParams.targetLength; break; case ZSTD_c_strategy : *value = (unsigned)CCtxParams->cParams.strategy; break; case ZSTD_c_contentSizeFlag : *value = CCtxParams->fParams.contentSizeFlag; break; case ZSTD_c_checksumFlag : *value = CCtxParams->fParams.checksumFlag; break; case ZSTD_c_dictIDFlag : *value = !CCtxParams->fParams.noDictIDFlag; break; case ZSTD_c_forceMaxWindow : *value = CCtxParams->forceWindow; break; case ZSTD_c_forceAttachDict : *value = CCtxParams->attachDictPref; break; case ZSTD_c_literalCompressionMode : *value = CCtxParams->literalCompressionMode; break; case ZSTD_c_nbWorkers : #ifndef ZSTD_MULTITHREAD assert(CCtxParams->nbWorkers == 0); #endif *value = CCtxParams->nbWorkers; break; case ZSTD_c_jobSize : #ifndef ZSTD_MULTITHREAD RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); #else assert(CCtxParams->jobSize <= INT_MAX); *value = (int)CCtxParams->jobSize; break; #endif case ZSTD_c_overlapLog : #ifndef ZSTD_MULTITHREAD RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); #else *value = CCtxParams->overlapLog; break; #endif case ZSTD_c_rsyncable : #ifndef ZSTD_MULTITHREAD RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); #else *value = CCtxParams->rsyncable; break; #endif case ZSTD_c_enableLongDistanceMatching : *value = CCtxParams->ldmParams.enableLdm; break; case ZSTD_c_ldmHashLog : *value = CCtxParams->ldmParams.hashLog; break; case ZSTD_c_ldmMinMatch : *value = CCtxParams->ldmParams.minMatchLength; break; case ZSTD_c_ldmBucketSizeLog : *value = CCtxParams->ldmParams.bucketSizeLog; break; case ZSTD_c_ldmHashRateLog : *value = CCtxParams->ldmParams.hashRateLog; break; case ZSTD_c_targetCBlockSize : *value = (int)CCtxParams->targetCBlockSize; break; case ZSTD_c_srcSizeHint : *value = (int)CCtxParams->srcSizeHint; break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; } /** ZSTD_CCtx_setParametersUsingCCtxParams() : * just applies `params` into `cctx` * no action is performed, parameters are merely stored. * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. * This is possible even if a compression is ongoing. * In which case, new parameters will be applied on the fly, starting with next compression job. */ size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "The context is in the wrong stage!"); RETURN_ERROR_IF(cctx->cdict, stage_wrong, "Can't override parameters with cdict attached (some must " "be inherited from the cdict)."); cctx->requestedParams = *params; return 0; } ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; return 0; } /** * Initializes the local dict using the requested parameters. * NOTE: This does not use the pledged src size, because it may be used for more * than one compression. */ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) { ZSTD_localDict* const dl = &cctx->localDict; ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams( &cctx->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN, dl->dictSize); if (dl->dict == NULL) { /* No local dictionary. */ assert(dl->dictBuffer == NULL); assert(dl->cdict == NULL); assert(dl->dictSize == 0); return 0; } if (dl->cdict != NULL) { assert(cctx->cdict == dl->cdict); /* Local dictionary already initialized. */ return 0; } assert(dl->dictSize > 0); assert(cctx->cdict == NULL); assert(cctx->prefixDict.dict == NULL); dl->cdict = ZSTD_createCDict_advanced( dl->dict, dl->dictSize, ZSTD_dlm_byRef, dl->dictContentType, cParams, cctx->customMem); RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed"); cctx->cdict = dl->cdict; return 0; } size_t ZSTD_CCtx_loadDictionary_advanced( ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't load a dictionary when ctx is not in init stage."); RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "no malloc for static CCtx"); DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); ZSTD_clearAllDicts(cctx); /* in case one already exists */ if (dict == NULL || dictSize == 0) /* no dictionary mode */ return 0; if (dictLoadMethod == ZSTD_dlm_byRef) { cctx->localDict.dict = dict; } else { void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem); RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!"); memcpy(dictBuffer, dict, dictSize); cctx->localDict.dictBuffer = dictBuffer; cctx->localDict.dict = dictBuffer; } cctx->localDict.dictSize = dictSize; cctx->localDict.dictContentType = dictContentType; return 0; } ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); } size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a dict when ctx not in init stage."); /* Free the existing local cdict (if any) to save memory. */ ZSTD_clearAllDicts(cctx); cctx->cdict = cdict; return 0; } size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) { return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); } size_t ZSTD_CCtx_refPrefix_advanced( ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a prefix when ctx not in init stage."); ZSTD_clearAllDicts(cctx); if (prefix != NULL && prefixSize > 0) { cctx->prefixDict.dict = prefix; cctx->prefixDict.dictSize = prefixSize; cctx->prefixDict.dictContentType = dictContentType; } return 0; } /*! ZSTD_CCtx_reset() : * Also dumps dictionary */ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) { if ( (reset == ZSTD_reset_session_only) || (reset == ZSTD_reset_session_and_parameters) ) { cctx->streamStage = zcss_init; cctx->pledgedSrcSizePlusOne = 0; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't reset parameters only when not in init stage."); ZSTD_clearAllDicts(cctx); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } return 0; } /** ZSTD_checkCParams() : control CParam values remain within authorized range. @return : 0, or an error code if one value is beyond authorized range */ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) { BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog); BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog); BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog); BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); return 0; } /** ZSTD_clampCParams() : * make CParam values within valid range. * @return : valid CParams */ static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { # define CLAMP_TYPE(cParam, val, type) { \ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ } # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) CLAMP(ZSTD_c_windowLog, cParams.windowLog); CLAMP(ZSTD_c_chainLog, cParams.chainLog); CLAMP(ZSTD_c_hashLog, cParams.hashLog); CLAMP(ZSTD_c_searchLog, cParams.searchLog); CLAMP(ZSTD_c_minMatch, cParams.minMatch); CLAMP(ZSTD_c_targetLength,cParams.targetLength); CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy); return cParams; } /** ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) { U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); return hashLog - btScale; } /** ZSTD_adjustCParams_internal() : * optimize `cPar` for a specified input (`srcSize` and `dictSize`). * mostly downsize to reduce memory consumption and initialization latency. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. * note : `srcSize==0` means 0! * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { static const U64 minSrcSize = 513; /* (1<<9) + 1 */ static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); assert(ZSTD_checkCParams(cPar)==0); if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) srcSize = minSrcSize; /* resize windowLog if input is small enough, to use less memory */ if ( (srcSize < maxWindowResize) && (dictSize < maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : ZSTD_highbit32(tSize-1) + 1; if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; } if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1; { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); } if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ return cPar; } ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize); } static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize); static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize); ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize) { ZSTD_compressionParameters cParams; if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { srcSizeHint = CCtxParams->srcSizeHint; } cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize); if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog; if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog; if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog; if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog; if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch; if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength; if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy; assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize); } static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, const U32 forCCtx) { size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't * surrounded by redzones in ASAN. */ size_t const tableSpace = chainSize * sizeof(U32) + hSize * sizeof(U32) + h3Size * sizeof(U32); size_t const optPotentialSpace = ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32)) + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32)) + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32)) + ZSTD_cwksp_alloc_size((1<strategy >= ZSTD_btopt)) ? optPotentialSpace : 0; DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", (U32)chainSize, (U32)hSize, (U32)h3Size); return tableSpace + optSpace; } size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); U32 const divider = (cParams.minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE); size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1); size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams); size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq)); /* estimateCCtxSize is for one-shot compression. So no buffers should * be needed. However, we still allocate two 0-sized buffers, which can * take space under ASAN. */ size_t const bufferSpace = ZSTD_cwksp_alloc_size(0) + ZSTD_cwksp_alloc_size(0); size_t const cctxSpace = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)); size_t const neededSpace = cctxSpace + entropySpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + bufferSpace; DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); return neededSpace; } } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms); } static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); return ZSTD_estimateCCtxSize_usingCParams(cParams); } size_t ZSTD_estimateCCtxSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { size_t const newMB = ZSTD_estimateCCtxSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) { RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0); size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize; size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; size_t const streamingSize = ZSTD_cwksp_alloc_size(inBuffSize) + ZSTD_cwksp_alloc_size(outBuffSize); return CCtxSize + streamingSize; } } size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms); } static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); return ZSTD_estimateCStreamSize_usingCParams(cParams); } size_t ZSTD_estimateCStreamSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { size_t const newMB = ZSTD_estimateCStreamSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } /* ZSTD_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads (non-blocking mode). */ ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { return ZSTDMT_getFrameProgression(cctx->mtctx); } #endif { ZSTD_frameProgression fp; size_t const buffered = (cctx->inBuff == NULL) ? 0 : cctx->inBuffPos - cctx->inToCompress; if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); assert(buffered <= ZSTD_BLOCKSIZE_MAX); fp.ingested = cctx->consumedSrcSize + buffered; fp.consumed = cctx->consumedSrcSize; fp.produced = cctx->producedCSize; fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */ fp.currentJobID = 0; fp.nbActiveWorkers = 0; return fp; } } /*! ZSTD_toFlushNow() * Only useful for multithreading scenarios currently (nbWorkers >= 1). */ size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { return ZSTDMT_toFlushNow(cctx->mtctx); } #endif (void)cctx; return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ } static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) { (void)cParams1; (void)cParams2; assert(cParams1.windowLog == cParams2.windowLog); assert(cParams1.chainLog == cParams2.chainLog); assert(cParams1.hashLog == cParams2.hashLog); assert(cParams1.searchLog == cParams2.searchLog); assert(cParams1.minMatch == cParams2.minMatch); assert(cParams1.targetLength == cParams2.targetLength); assert(cParams1.strategy == cParams2.strategy); } void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) bs->rep[i] = repStartValue[i]; bs->entropy.huf.repeatMode = HUF_repeat_none; bs->entropy.fse.offcode_repeatMode = FSE_repeat_none; bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none; bs->entropy.fse.litlength_repeatMode = FSE_repeat_none; } /*! ZSTD_invalidateMatchState() * Invalidate all the matches in the match finder tables. * Requires nextSrc and base to be set (can be NULL). */ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) { ZSTD_window_clear(&ms->window); ms->nextToUpdate = ms->window.dictLimit; ms->loadedDictEnd = 0; ms->opt.litLengthSum = 0; /* force reset of btopt stats */ ms->dictMatchState = NULL; } /** * Indicates whether this compression proceeds directly from user-provided * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or * whether the context needs to buffer the input/output (ZSTDb_buffered). */ typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e; /** * Controls, for this matchState reset, whether the tables need to be cleared / * prepared for the coming compression (ZSTDcrp_makeClean), or whether the * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a * subsequent operation will overwrite the table space anyways (e.g., copying * the matchState contents in from a CDict). */ typedef enum { ZSTDcrp_makeClean, ZSTDcrp_leaveDirty } ZSTD_compResetPolicy_e; /** * Controls, for this matchState reset, whether indexing can continue where it * left off (ZSTDirp_continue), or whether it needs to be restarted from zero * (ZSTDirp_reset). */ typedef enum { ZSTDirp_continue, ZSTDirp_reset } ZSTD_indexResetPolicy_e; typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e; static size_t ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, const ZSTD_compressionParameters* cParams, const ZSTD_compResetPolicy_e crp, const ZSTD_indexResetPolicy_e forceResetIndex, const ZSTD_resetTarget_e forWho) { size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); if (forceResetIndex == ZSTDirp_reset) { ZSTD_window_init(&ms->window); ZSTD_cwksp_mark_tables_dirty(ws); } ms->hashLog3 = hashLog3; ZSTD_invalidateMatchState(ms); assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ ZSTD_cwksp_clear_tables(ws); DEBUGLOG(5, "reserving table space"); /* table Space */ ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32)); RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, "failed a workspace allocation in ZSTD_reset_matchState"); DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty); if (crp!=ZSTDcrp_leaveDirty) { /* reset tables only */ ZSTD_cwksp_clean_tables(ws); } /* opt parser space */ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { DEBUGLOG(4, "reserving optimal parser space"); ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)); ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); } ms->cParams = *cParams; RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, "failed a workspace allocation in ZSTD_reset_matchState"); return 0; } /* ZSTD_indexTooCloseToMax() : * minor optimization : prefer memset() rather than reduceIndex() * which is measurably slow in some circumstances (reported for Visual Studio). * Works when re-using a context for a lot of smallish inputs : * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN, * memset() will be triggered before reduceIndex(). */ #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB) static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) { return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); } /*! ZSTD_resetCCtx_internal() : note : `params` are assumed fully validated at this stage */ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_CCtx_params params, U64 const pledgedSrcSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) { ZSTD_cwksp* const ws = &zc->workspace; DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u", (U32)pledgedSrcSize, params.cParams.windowLog); assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); zc->isFirstBlock = 1; if (params.ldmParams.enableLdm) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); assert(params.ldmParams.hashRateLog < 32); zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength); } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (params.cParams.minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0; size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0; size_t const matchStateSize = ZSTD_sizeof_matchState(¶ms.cParams, /* forCCtx */ 1); size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize); ZSTD_indexResetPolicy_e needsIndexReset = zc->initialized ? ZSTDirp_continue : ZSTDirp_reset; if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) { needsIndexReset = ZSTDirp_reset; } if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0); /* Check if workspace is large enough, alloc a new one if needed */ { size_t const cctxSpace = zc->staticSize ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE); size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams); size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)); size_t const neededSpace = cctxSpace + entropySpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + bufferSpace; int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers", neededSpace>>10, matchStateSize>>10, bufferSpace>>10); DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); if (workspaceTooSmall || workspaceWasteful) { DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB", ZSTD_cwksp_sizeof(ws) >> 10, neededSpace >> 10); RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize"); needsIndexReset = ZSTDirp_reset; ZSTD_cwksp_free(ws, zc->customMem); FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), ""); DEBUGLOG(5, "reserving object space"); /* Statically sized space. * entropyWorkspace never moves, * though prev/next block swap places */ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, HUF_WORKSPACE_SIZE); RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); } } ZSTD_cwksp_clear(ws); /* init params */ zc->appliedParams = params; zc->blockState.matchState.cParams = params.cParams; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) zc->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(4, "pledged content size : %u ; flag : %u", (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); zc->blockSize = blockSize; XXH64_reset(&zc->xxhState, 0); zc->stage = ZSTDcs_init; zc->dictID = 0; ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); /* ZSTD_wildcopy() is used to copy into the literals buffer, * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. */ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); zc->seqStore.maxNbLit = blockSize; /* buffers */ zc->inBuffSize = buffInSize; zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); zc->outBuffSize = buffOutSize; zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); /* ldm bucketOffsets table */ if (params.ldmParams.enableLdm) { /* TODO: avoid memset? */ size_t const ldmBucketSize = ((size_t)1) << (params.ldmParams.hashLog - params.ldmParams.bucketSizeLog); zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize); memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize); } /* sequences storage */ ZSTD_referenceExternalSequences(zc, NULL, 0); zc->seqStore.maxNbSeq = maxNbSeq; zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); FORWARD_IF_ERROR(ZSTD_reset_matchState( &zc->blockState.matchState, ws, ¶ms.cParams, crp, needsIndexReset, ZSTD_resetTarget_CCtx), ""); /* ldm hash table */ if (params.ldmParams.enableLdm) { /* TODO: avoid memset? */ size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog; zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); zc->maxNbLdmSequences = maxNbLdmSeq; ZSTD_window_init(&zc->ldmState.window); ZSTD_window_clear(&zc->ldmState.window); zc->ldmState.loadedDictEnd = 0; } DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); zc->initialized = 1; return 0; } } /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { int i; for (i=0; iblockState.prevCBlock->rep[i] = 0; assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); } /* These are the approximate sizes for each strategy past which copying the * dictionary tables into the working context is faster than using them * in-place. */ static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = { 8 KB, /* unused */ 8 KB, /* ZSTD_fast */ 16 KB, /* ZSTD_dfast */ 32 KB, /* ZSTD_greedy */ 32 KB, /* ZSTD_lazy */ 32 KB, /* ZSTD_lazy2 */ 32 KB, /* ZSTD_btlazy2 */ 32 KB, /* ZSTD_btopt */ 8 KB, /* ZSTD_btultra */ 8 KB /* ZSTD_btultra2 */ }; static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize) { size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; return ( pledgedSrcSize <= cutoff || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || params->attachDictPref == ZSTD_dictForceAttach ) && params->attachDictPref != ZSTD_dictForceCopy && !params->forceWindow; /* dictMatchState isn't correctly * handled in _enforceMaxDist */ } static size_t ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { { const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams; unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Resize working context table params for input only, since the dict * has its own tables. */ /* pledgeSrcSize == 0 means 0! */ params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0); params.cParams.windowLog = windowLog; FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_makeClean, zbuff), ""); assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); } { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc - cdict->matchState.window.base); const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; if (cdictLen == 0) { /* don't even attach dictionaries with no contents */ DEBUGLOG(4, "skipping attaching empty dictionary"); } else { DEBUGLOG(4, "attaching dictionary into context"); cctx->blockState.matchState.dictMatchState = &cdict->matchState; /* prep working match state so dict matches never have negative indices * when they are translated to the working context's index space. */ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { cctx->blockState.matchState.window.nextSrc = cctx->blockState.matchState.window.base + cdictEnd; ZSTD_window_clear(&cctx->blockState.matchState.window); } /* loadedDictEnd is expressed within the referential of the active context */ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; } } cctx->dictID = cdict->dictID; /* copy block state */ memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; DEBUGLOG(4, "copying dictionary into context"); { unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Copy only compression parameters related to tables. */ params.cParams = *cdict_cParams; params.cParams.windowLog = windowLog; FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_leaveDirty, zbuff), ""); assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); } ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); /* copy tables */ { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog); size_t const hSize = (size_t)1 << cdict_cParams->hashLog; memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, hSize * sizeof(U32)); memcpy(cctx->blockState.matchState.chainTable, cdict->matchState.chainTable, chainSize * sizeof(U32)); } /* Zero the hashTable3, since the cdict never fills it */ { int const h3log = cctx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; assert(cdict->matchState.hashLog3 == 0); memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); } ZSTD_cwksp_mark_tables_clean(&cctx->workspace); /* copy dictionary offsets */ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } cctx->dictID = cdict->dictID; /* copy block state */ memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } /* We have a choice between copying the dictionary context into the working * context, or referencing the dictionary context from the working context * in-place. We decide here which strategy to use. */ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (unsigned)pledgedSrcSize); if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { return ZSTD_resetCCtx_byAttachingCDict( cctx, cdict, *params, pledgedSrcSize, zbuff); } else { return ZSTD_resetCCtx_byCopyingCDict( cctx, cdict, *params, pledgedSrcSize, zbuff); } } /*! ZSTD_copyCCtx_internal() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * The "context", in this case, refers to the hash and chain tables, * entropy tables, and dictionary references. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. * @return : 0, or an error code */ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, ZSTD_frameParameters fParams, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(5, "ZSTD_copyCCtx_internal"); RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong, "Can't copy a ctx that's not in init stage."); memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); { ZSTD_CCtx_params params = dstCCtx->requestedParams; /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; params.fParams = fParams; ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, ZSTDcrp_leaveDirty, zbuff); assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); } ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); /* copy tables */ { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog); size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; int const h3log = srcCCtx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, hSize * sizeof(U32)); memcpy(dstCCtx->blockState.matchState.chainTable, srcCCtx->blockState.matchState.chainTable, chainSize * sizeof(U32)); memcpy(dstCCtx->blockState.matchState.hashTable3, srcCCtx->blockState.matchState.hashTable3, h3Size * sizeof(U32)); } ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); /* copy dictionary offsets */ { const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } dstCCtx->dictID = srcCCtx->dictID; /* copy block state */ memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); return 0; } /*! ZSTD_copyCCtx() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * pledgedSrcSize==0 means "unknown". * @return : 0, or an error code */ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) { ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0); ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); } #define ZSTD_ROWSIZE 16 /*! ZSTD_reduceTable() : * reduce table indexes by `reducerValue`, or squash to zero. * PreserveMark preserves "unsorted mark" for btlazy2 strategy. * It must be set to a clear 0/1 value, to remove branch during inlining. * Presume table size is a multiple of ZSTD_ROWSIZE * to help auto-vectorization */ FORCE_INLINE_TEMPLATE void ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) { int const nbRows = (int)size / ZSTD_ROWSIZE; int cellNb = 0; int rowNb; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ assert(size < (1U<<31)); /* can be casted to int */ #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the table re-use logic is sound, and that we don't * access table space that we haven't cleaned, we re-"poison" the table * space every time we mark it dirty. * * This function however is intended to operate on those dirty tables and * re-clean them. So when this function is used correctly, we can unpoison * the memory it operated on. This introduces a blind spot though, since * if we now try to operate on __actually__ poisoned memory, we will not * detect that. */ __msan_unpoison(table, size * sizeof(U32)); #endif for (rowNb=0 ; rowNb < nbRows ; rowNb++) { int column; for (column=0; columncParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } if (params->cParams.strategy != ZSTD_fast) { U32 const chainSize = (U32)1 << params->cParams.chainLog; if (params->cParams.strategy == ZSTD_btlazy2) ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); else ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); } if (ms->hashLog3) { U32 const h3Size = (U32)1 << ms->hashLog3; ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); } } /*-******************************************************* * Block entropic compression *********************************************************/ /* See doc/zstd_compression_format.md for detailed format description */ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) { const seqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; ulongLengthID==1) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthID==2) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; } /* ZSTD_useTargetCBlockSize(): * Returns if target compressed block size param is being used. * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. * Returns 1 if true, 0 otherwise. */ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) { DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize); return (cctxParams->targetCBlockSize != 0); } /* ZSTD_compressSequences_internal(): * actually compresses both literals and sequences */ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, void* entropyWorkspace, size_t entropyWkspSize, const int bmi2) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; unsigned count[MaxSeq+1]; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ const seqDef* const sequences = seqStorePtr->sequencesStart; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); BYTE* seqHead; BYTE* lastNCount = NULL; DEBUGLOG(5, "ZSTD_compressSequences_internal (nbSeq=%zu)", nbSeq); ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<litStart; size_t const litSize = (size_t)(seqStorePtr->lit - literals); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, ZSTD_disableLiteralsCompression(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, bmi2); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; } /* Sequences Header */ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, dstSize_tooSmall, "Can't fit seq hdr in output buf!"); if (nbSeq < 128) { *op++ = (BYTE)nbSeq; } else if (nbSeq < LONGNBSEQ) { op[0] = (BYTE)((nbSeq>>8) + 0x80); op[1] = (BYTE)nbSeq; op+=2; } else { op[0]=0xFF; MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)); op+=3; } assert(op <= oend); if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return (size_t)(op - ostart); } /* seqHead : flags for FSE encoding type */ seqHead = op++; assert(op <= oend); /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* build CTable for Literal Lengths */ { unsigned max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable), entropyWorkspace, entropyWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed"); if (LLtype == set_compressed) lastNCount = op; op += countSize; assert(op <= oend); } } /* build CTable for Offsets */ { unsigned max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp( count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode; Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable), entropyWorkspace, entropyWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed"); if (Offtype == set_compressed) lastNCount = op; op += countSize; assert(op <= oend); } } /* build CTable for MatchLengths */ { unsigned max = MaxML; size_t const mostFrequent = HIST_countFast_wksp( count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable), entropyWorkspace, entropyWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed"); if (MLtype == set_compressed) lastNCount = op; op += countSize; assert(op <= oend); } } *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); { size_t const bitstreamSize = ZSTD_encodeSequences( op, (size_t)(oend - op), CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); op += bitstreamSize; assert(op <= oend); /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ if (lastNCount && (op - lastNCount) < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(op - lastNCount == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } } DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart)); return (size_t)(op - ostart); } MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, size_t srcSize, void* entropyWorkspace, size_t entropyWkspSize, int bmi2) { size_t const cSize = ZSTD_compressSequences_internal( seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity, entropyWorkspace, entropyWkspSize, bmi2); if (cSize == 0) return 0; /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) return 0; /* block not compressed */ FORWARD_IF_ERROR(cSize, "ZSTD_compressSequences_internal failed"); /* Check compressibility */ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } return cSize; } /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode) { static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra, ZSTD_compressBlock_btultra2 }, { ZSTD_compressBlock_fast_extDict /* default for 0 */, ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict, ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict, ZSTD_compressBlock_btultra_extDict }, { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, ZSTD_compressBlock_fast_dictMatchState, ZSTD_compressBlock_doubleFast_dictMatchState, ZSTD_compressBlock_greedy_dictMatchState, ZSTD_compressBlock_lazy_dictMatchState, ZSTD_compressBlock_lazy2_dictMatchState, ZSTD_compressBlock_btlazy2_dictMatchState, ZSTD_compressBlock_btopt_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState } }; ZSTD_blockCompressor selectedCompressor; ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; assert(selectedCompressor != NULL); return selectedCompressor; } static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, const BYTE* anchor, size_t lastLLSize) { memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } void ZSTD_resetSeqStore(seqStore_t* ssPtr) { ssPtr->lit = ssPtr->litStart; ssPtr->sequences = ssPtr->sequencesStart; ssPtr->longLengthID = 0; } typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) { ZSTD_matchState_t* const ms = &zc->blockState.matchState; DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */ } ZSTD_resetSeqStore(&(zc->seqStore)); /* required for optimal parser to read stats from dictionary */ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* tell the optimal parser how we expect to compress literals */ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; /* a gap between an attached dict and the current window is not safe, * they must remain adjacent, * and when that stops being the case, the dict must be unset */ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit); /* limited update after a very long match */ { const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const U32 current = (U32)(istart-base); if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ if (current > ms->nextToUpdate + 384) ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384)); } /* select and store sequences */ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms); size_t lastLLSize; { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(!zc->appliedParams.ldmParams.enableLdm); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); } else if (zc->appliedParams.ldmParams.enableLdm) { rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0}; ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; /* Updates ldmSeqStore.size */ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, &zc->appliedParams.ldmParams, src, srcSize), ""); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&ldmSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); } else { /* not long range mode */ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode); lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); } { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); } } return ZSTDbss_compress; } static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) { const seqStore_t* seqStore = ZSTD_getSeqStore(zc); const seqDef* seqs = seqStore->sequencesStart; size_t seqsSize = seqStore->sequences - seqs; ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex]; size_t i; size_t position; int repIdx; assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences); for (i = 0, position = 0; i < seqsSize; ++i) { outSeqs[i].offset = seqs[i].offset; outSeqs[i].litLength = seqs[i].litLength; outSeqs[i].matchLength = seqs[i].matchLength + MINMATCH; if (i == seqStore->longLengthPos) { if (seqStore->longLengthID == 1) { outSeqs[i].litLength += 0x10000; } else if (seqStore->longLengthID == 2) { outSeqs[i].matchLength += 0x10000; } } if (outSeqs[i].offset <= ZSTD_REP_NUM) { outSeqs[i].rep = outSeqs[i].offset; repIdx = (unsigned int)i - outSeqs[i].offset; if (outSeqs[i].litLength == 0) { if (outSeqs[i].offset < 3) { --repIdx; } else { repIdx = (unsigned int)i - 1; } ++outSeqs[i].rep; } assert(repIdx >= -3); outSeqs[i].offset = repIdx >= 0 ? outSeqs[repIdx].offset : repStartValue[-repIdx - 1]; if (outSeqs[i].rep == 4) { --outSeqs[i].offset; } } else { outSeqs[i].offset -= ZSTD_REP_NUM; } position += outSeqs[i].litLength; outSeqs[i].matchPos = (unsigned int)position; position += outSeqs[i].matchLength; } zc->seqCollector.seqIndex += seqsSize; } size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize) { const size_t dstCapacity = ZSTD_compressBound(srcSize); void* dst = ZSTD_malloc(dstCapacity, ZSTD_defaultCMem); SeqCollector seqCollector; RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); seqCollector.collectSequences = 1; seqCollector.seqStart = outSeqs; seqCollector.seqIndex = 0; seqCollector.maxSequences = outSeqsSize; zc->seqCollector = seqCollector; ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); ZSTD_free(dst, ZSTD_defaultCMem); return zc->seqCollector.seqIndex; } /* Returns true if the given block is a RLE block */ static int ZSTD_isRLE(const BYTE *ip, size_t length) { size_t i; if (length < 2) return 1; for (i = 1; i < length; ++i) { if (ip[0] != ip[i]) return 0; } return 1; } /* Returns true if the given block may be RLE. * This is just a heuristic based on the compressibility. * It may return both false positives and false negatives. */ static int ZSTD_maybeRLE(seqStore_t const* seqStore) { size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); return nbSeqs < 4 && nbLits < 10; } static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc) { ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock; zc->blockState.prevCBlock = zc->blockState.nextCBlock; zc->blockState.nextCBlock = tmp; } static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame) { /* This the upper bound for the length of an rle block. * This isn't the actual upper bound. Finding the real threshold * needs further investigation. */ const U32 rleMaxLength = 25; size_t cSize; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate); { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; } } if (zc->seqCollector.collectSequences) { ZSTD_copyBlockSequences(zc); return 0; } /* encode sequences and literals */ cSize = ZSTD_compressSequences(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, dst, dstCapacity, srcSize, zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */, zc->bmi2); if (frame && /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ !zc->isFirstBlock && cSize < rleMaxLength && ZSTD_isRLE(ip, srcSize)) { cSize = 1; op[0] = ip[0]; } out: if (!ZSTD_isError(cSize) && cSize > 1) { ZSTD_confirmRepcodesAndEntropyTables(zc); } /* We check that dictionaries have offset codes available for the first * block. After the first block, the offcode table might not have large * enough codes to represent the offsets in the data. */ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const size_t bss, U32 lastBlock) { DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()"); if (bss == ZSTDbss_compress) { if (/* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ !zc->isFirstBlock && ZSTD_maybeRLE(&zc->seqStore) && ZSTD_isRLE((BYTE const*)src, srcSize)) { return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock); } /* Attempt superblock compression. * * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the * standard ZSTD_compressBound(). This is a problem, because even if we have * space now, taking an extra byte now could cause us to run out of space later * and violate ZSTD_compressBound(). * * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize. * * In order to respect ZSTD_compressBound() we must attempt to emit a raw * uncompressed block in these cases: * * cSize == 0: Return code for an uncompressed block. * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize). * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of * output space. * * cSize >= blockBound(srcSize): We have expanded the block too much so * emit an uncompressed block. */ { size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); if (cSize != ERROR(dstSize_tooSmall)) { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { ZSTD_confirmRepcodesAndEntropyTables(zc); return cSize; } } } } DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); /* Superblock compression failed, attempt to emit a single no compress block. * The decoder will be able to stream this block since it is uncompressed. */ return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); } static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { size_t cSize = 0; const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed"); if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, void const* ip, void const* iend) { if (ZSTD_window_needOverflowCorrection(ms->window, iend)) { U32 const maxDist = (U32)1 << params->cParams.windowLog; U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy); U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); ZSTD_cwksp_mark_tables_dirty(ws); ZSTD_reduceIndex(ms, params, correction); ZSTD_cwksp_mark_tables_clean(ws); if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; else ms->nextToUpdate -= correction; /* invalidate dictionaries on overflow correction */ ms->loadedDictEnd = 0; ms->dictMatchState = NULL; } } /*! ZSTD_compress_frameChunk() : * Compress a chunk of data into one or multiple blocks. * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) * @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastFrameChunk) { size_t blockSize = cctx->blockSize; size_t remaining = srcSize; const BYTE* ip = (const BYTE*)src; BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); if (cctx->appliedParams.fParams.checksumFlag && srcSize) XXH64_update(&cctx->xxhState, src, srcSize); while (remaining) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, dstSize_tooSmall, "not enough space to store compressed block"); if (remaining < blockSize) blockSize = remaining; ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; { size_t cSize; if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) { cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed"); assert(cSize > 0); assert(cSize <= blockSize + ZSTD_blockHeaderSize); } else { cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize, 1 /* frame */); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed"); if (cSize == 0) { /* block is not compressible */ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); } else { U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } } ip += blockSize; assert(remaining >= blockSize); remaining -= blockSize; op += cSize; assert(dstCapacity >= cSize); dstCapacity -= cSize; cctx->isFirstBlock = 0; DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", (unsigned)cSize); } } if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; return (size_t)(op-ostart); } static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) { BYTE* const op = (BYTE*)dst; U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ U32 const checksumFlag = params->fParams.checksumFlag>0; U32 const windowSize = (U32)1 << params->cParams.windowLog; U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); U32 const fcsCode = params->fParams.contentSizeFlag ? (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); size_t pos=0; assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall, "dst buf is too small to fit worst-case frame header size."); DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); if (params->format == ZSTD_f_zstd1) { MEM_writeLE32(dst, ZSTD_MAGICNUMBER); pos = 4; } op[pos++] = frameHeaderDescriptionByte; if (!singleSegment) op[pos++] = windowLogByte; switch(dictIDSizeCode) { default: assert(0); /* impossible */ case 0 : break; case 1 : op[pos] = (BYTE)(dictID); pos++; break; case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; } switch(fcsCode) { default: assert(0); /* impossible */ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; } return pos; } /* ZSTD_writeLastEmptyBlock() : * output an empty Block with end-of-frame mark to complete a frame * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) * or an error code if `dstCapacity` is too small (stage != ZSTDcs_init, stage_wrong, "wrong cctx stage"); RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm, parameter_unsupported, "incompatible with ldm"); cctx->externSeqStore.seq = seq; cctx->externSeqStore.size = nbSeq; cctx->externSeqStore.capacity = nbSeq; cctx->externSeqStore.pos = 0; return 0; } static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame, U32 lastFrameChunk) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; size_t fhSize = 0; DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", cctx->stage, (unsigned)srcSize); RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, "missing init (ZSTD_compressBegin)"); if (frame && (cctx->stage==ZSTDcs_init)) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, cctx->pledgedSrcSizePlusOne-1, cctx->dictID); FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); assert(fhSize <= dstCapacity); dstCapacity -= fhSize; dst = (char*)dst + fhSize; cctx->stage = ZSTDcs_ongoing; } if (!srcSize) return fhSize; /* do not generate an empty block if no input */ if (!ZSTD_window_update(&ms->window, src, srcSize)) { ms->nextToUpdate = ms->window.dictLimit; } if (cctx->appliedParams.ldmParams.enableLdm) { ZSTD_window_update(&cctx->ldmState.window, src, srcSize); } if (!frame) { /* overflow check and correction for block mode */ ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, src, (BYTE const*)src + srcSize); } DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); { size_t const cSize = frame ? ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed"); cctx->consumedSrcSize += srcSize; cctx->producedCSize += (cSize + fhSize); assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); RETURN_ERROR_IF( cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, srcSize_wrong, "error : pledgedSrcSize = %u, while realSrcSize >= %u", (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize); } return cSize + fhSize; } } size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); } size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) { ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; assert(!ZSTD_checkCParams(cParams)); return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); } size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); } /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* src, size_t srcSize, ZSTD_dictTableLoadMethod_e dtlm) { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; ZSTD_window_update(&ms->window, src, srcSize); ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); if (params->ldmParams.enableLdm && ls != NULL) { ZSTD_window_update(&ls->window, src, srcSize); ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); } /* Assert that we the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); if (srcSize <= HASH_READ_SIZE) return 0; while (iend - ip > HASH_READ_SIZE) { size_t const remaining = (size_t)(iend - ip); size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX); const BYTE* const ichunk = ip + chunk; ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk); if (params->ldmParams.enableLdm && ls != NULL) ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, ¶ms->ldmParams); switch(params->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, ichunk, dtlm); break; case ZSTD_dfast: ZSTD_fillDoubleHashTable(ms, ichunk, dtlm); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: if (chunk >= HASH_READ_SIZE) ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE); break; case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: if (chunk >= HASH_READ_SIZE) ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk); break; default: assert(0); /* not possible : not a valid strategy id */ } ip = ichunk; } ms->nextToUpdate = (U32)(iend - ms->window.base); return 0; } /* Dictionaries that assign zero probability to symbols that show up causes problems when FSE encoding. Refuse dictionaries that assign zero probability to symbols that we may encounter during compression. NOTE: This behavior is not standard and could be improved in the future. */ static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { U32 s; RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted, "dict fse tables don't have all symbols"); for (s = 0; s <= maxSymbolValue; ++s) { RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted, "dict fse tables don't have all symbols"); } return 0; } size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, short* offcodeNCount, unsigned* offcodeMaxValue, const void* const dict, size_t dictSize) { const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */ const BYTE* const dictEnd = dictPtr + dictSize; dictPtr += 8; bs->entropy.huf.repeatMode = HUF_repeat_check; { unsigned maxSymbolValue = 255; unsigned hasZeroWeights = 1; size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr, &hasZeroWeights); /* We only set the loaded table as valid if it contains all non-zero * weights. Otherwise, we set it to check */ if (!hasZeroWeights) bs->entropy.huf.repeatMode = HUF_repeat_valid; RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, ""); dictPtr += hufHeaderSize; } { unsigned offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ /* fill all offset symbols to avoid garbage at end of table */ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.offcodeCTable, offcodeNCount, MaxOff, offcodeLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); /* Every match length code must have non-zero probability */ FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML), ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); /* Every literal length code must have non-zero probability */ FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL), ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); dictPtr += litlengthHeaderSize; } RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); bs->rep[0] = MEM_readLE32(dictPtr+0); bs->rep[1] = MEM_readLE32(dictPtr+4); bs->rep[2] = MEM_readLE32(dictPtr+8); dictPtr += 12; return dictPtr - (const BYTE*)dict; } /* Dictionary format : * See : * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format */ /*! ZSTD_loadZstdDictionary() : * @return : dictID, or an error code * assumptions : magic number supposed already checked * dictSize supposed >= 8 */ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff; size_t dictID; size_t eSize; ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= 8); assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ ); eSize = ZSTD_loadCEntropy(bs, workspace, offcodeNCount, &offcodeMaxValue, dict, dictSize); FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed"); dictPtr += eSize; { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); U32 offcodeMax = MaxOff; if (dictContentSize <= ((U32)-1) - 128 KB) { U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ } /* All offset values <= dictContentSize + 128 KB must be representable */ FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)), ""); /* All repCodes must be <= dictContentSize and != 0*/ { U32 u; for (u=0; u<3; u++) { RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, ""); RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); } } bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid; bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid; bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid; FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), ""); return dictID; } } /** ZSTD_compress_insertDictionary() : * @return : dictID, or an error code */ static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, const ZSTD_CCtx_params* params, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); if ((dict==NULL) || (dictSize<8)) { RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); return 0; } ZSTD_reset_compressedBlockState(bs); /* dict restricted modes */ if (dictContentType == ZSTD_dct_rawContent) return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm); if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_auto) { DEBUGLOG(4, "raw content dictionary detected"); return ZSTD_loadDictionaryContent( ms, ls, ws, params, dict, dictSize, dtlm); } RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); assert(0); /* impossible */ } /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary( bs, ms, ws, params, dict, dictSize, dtlm, workspace); } #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6) /*! ZSTD_compressBegin_internal() : * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if ( (cdict) && (cdict->dictContentSize > 0) && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || cdict->compressionLevel == 0) && (params->attachDictPref != ZSTD_dictForceLoad) ) { return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); } FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize, ZSTDcrp_makeClean, zbuff) , ""); { size_t const dictID = cdict ? ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, dictContentType, dtlm, cctx->entropyWorkspace) : ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= UINT_MAX); cctx->dictID = (U32)dictID; } return 0; } size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog); /* compression parameters verification and optimization */ FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , ""); return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, dtlm, cdict, params, pledgedSrcSize, ZSTDb_not_buffered); } /*! ZSTD_compressBegin_advanced() : * @return : 0, or an error code */ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms); return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL /*cdict*/, &cctxParams, pledgedSrcSize); } size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms); DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); } size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } /*! ZSTD_writeEpilogue() : * Ends a frame. * @return : nb of bytes written into dst (or an error code) */ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) { BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; size_t fhSize = 0; DEBUGLOG(4, "ZSTD_writeEpilogue"); RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); /* special case : empty frame */ if (cctx->stage == ZSTDcs_init) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); dstCapacity -= fhSize; op += fhSize; cctx->stage = ZSTDcs_ongoing; } if (cctx->stage != ZSTDcs_ending) { /* write one last empty block, make it the "last" block */ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue"); MEM_writeLE32(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; } if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); MEM_writeLE32(op, checksum); op += 4; } cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ return op-ostart; } size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 1 /* last chunk */); FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed"); endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed"); assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); DEBUGLOG(4, "end of frame : controlling src size"); RETURN_ERROR_IF( cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, srcSize_wrong, "error : pledgedSrcSize = %u, while realSrcSize = %u", (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize); } return cSize + endResult; } static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, const ZSTD_parameters* params) { ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params); DEBUGLOG(4, "ZSTD_compress_internal"); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams); } size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params) { DEBUGLOG(4, "ZSTD_compress_advanced"); FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); return ZSTD_compress_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, ¶ms); } /* Internal */ size_t ZSTD_compress_advanced_internal( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize); FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) , ""); return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0); ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms); DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize); assert(params.fParams.contentSizeFlag == 1); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams); } size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); assert(cctx != NULL); return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); } size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { size_t result; ZSTD_CCtx ctxBody; ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ return result; } /* ===== Dictionary API ===== */ /*! ZSTD_estimateCDictSize_advanced() : * Estimate amount of memory that will be needed to create a dictionary with following arguments */ size_t ZSTD_estimateCDictSize_advanced( size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod) { DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); } size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); } size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support sizeof on NULL */ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); /* cdict may be in the workspace */ return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict)) + ZSTD_cwksp_sizeof(&cdict->workspace); } static size_t ZSTD_initCDict_internal( ZSTD_CDict* cdict, const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); assert(!ZSTD_checkCParams(cParams)); cdict->matchState.cParams = cParams; if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { cdict->dictContent = dictBuffer; } else { void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*))); RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!"); cdict->dictContent = internalBuffer; memcpy(internalBuffer, dictBuffer, dictSize); } cdict->dictContentSize = dictSize; cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE); /* Reset the state to no dictionary */ ZSTD_reset_compressedBlockState(&cdict->cBlockState); FORWARD_IF_ERROR(ZSTD_reset_matchState( &cdict->matchState, &cdict->workspace, &cParams, ZSTDcrp_makeClean, ZSTDirp_reset, ZSTD_resetTarget_CDict), ""); /* (Maybe) load the dictionary * Skips loading the dictionary if it is < 8 bytes. */ { ZSTD_CCtx_params params; memset(¶ms, 0, sizeof(params)); params.compressionLevel = ZSTD_CLEVEL_DEFAULT; params.fParams.contentSizeFlag = 1; params.cParams = cParams; { size_t const dictID = ZSTD_compress_insertDictionary( &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, ¶ms, cdict->dictContent, cdict->dictContentSize, dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= (size_t)(U32)-1); cdict->dictID = (U32)dictID; } } return 0; } ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) { DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { size_t const workspaceSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))); void* const workspace = ZSTD_malloc(workspaceSize, customMem); ZSTD_cwksp ws; ZSTD_CDict* cdict; if (!workspace) { ZSTD_free(workspace, customMem); return NULL; } ZSTD_cwksp_init(&ws, workspace, workspaceSize); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); assert(cdict != NULL); ZSTD_cwksp_move(&cdict->workspace, &ws); cdict->customMem = customMem; cdict->compressionLevel = 0; /* signals advanced API usage */ if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dictBuffer, dictSize, dictLoadMethod, dictContentType, cParams) )) { ZSTD_freeCDict(cdict); return NULL; } return cdict; } } ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); if (cdict) cdict->compressionLevel = compressionLevel == 0 ? ZSTD_CLEVEL_DEFAULT : compressionLevel; return cdict; } ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); return ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); } size_t ZSTD_freeCDict(ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = cdict->customMem; int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); ZSTD_cwksp_free(&cdict->workspace, cMem); if (!cdictInWorkspace) { ZSTD_free(cdict, cMem); } return 0; } } /*! ZSTD_initStaticCDict_advanced() : * Generate a digested dictionary in provided memory area. * workspace: The memory area to emplace the dictionary into. * Provided pointer must 8-bytes aligned. * It must outlive dictionary usage. * workspaceSize: Use ZSTD_estimateCDictSize() * to determine how large workspace must be. * cParams : use ZSTD_getCParams() to transform a compression level * into its relevants cParams. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) * Note : there is no corresponding "free" function. * Since workspace was allocated externally, it must be freed externally. */ const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + matchStateSize; ZSTD_CDict* cdict; if ((size_t)workspace & 7) return NULL; /* 8-aligned */ { ZSTD_cwksp ws; ZSTD_cwksp_init(&ws, workspace, workspaceSize); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); if (cdict == NULL) return NULL; ZSTD_cwksp_move(&cdict->workspace, &ws); } DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); if (workspaceSize < neededSize) return NULL; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, cParams) )) return NULL; return cdict; } ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) { assert(cdict != NULL); return cdict->matchState.cParams; } /* ZSTD_compressBegin_usingCDict_advanced() : * cdict must be != NULL */ size_t ZSTD_compressBegin_usingCDict_advanced( ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) { DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced"); RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!"); { ZSTD_CCtx_params params = cctx->requestedParams; params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || cdict->compressionLevel == 0 ) && (params.attachDictPref != ZSTD_dictForceLoad) ? ZSTD_getCParamsFromCDict(cdict) : ZSTD_getCParams(cdict->compressionLevel, pledgedSrcSize, cdict->dictContentSize); /* Increase window log to fit the entire dictionary and source if the * source size is known. Limit the increase to 19, which is the * window log for compression level 1 with the largest source size. */ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog); } params.fParams = fParams; return ZSTD_compressBegin_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, cdict, ¶ms, pledgedSrcSize, ZSTDb_not_buffered); } } /* ZSTD_compressBegin_usingCDict() : * pledgedSrcSize=0 means "unknown" * if pledgedSrcSize>0, it will enable contentSizeFlag */ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag); return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); } size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } /*! ZSTD_compress_usingCDict() : * Compression using a digested Dictionary. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. * Note that compression parameters are decided at CDict creation time * while frame parameters are hardcoded */ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); } /* ****************************************************************** * Streaming ********************************************************************/ ZSTD_CStream* ZSTD_createCStream(void) { DEBUGLOG(3, "ZSTD_createCStream"); return ZSTD_createCStream_advanced(ZSTD_defaultCMem); } ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticCCtx(workspace, workspaceSize); } ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) { /* CStream and CCtx are now same object */ return ZSTD_createCCtx_advanced(customMem); } size_t ZSTD_freeCStream(ZSTD_CStream* zcs) { return ZSTD_freeCCtx(zcs); /* same object */ } /*====== Initialization ======*/ size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx, const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType, const ZSTD_CDict* const cdict, ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize) { DEBUGLOG(4, "ZSTD_resetCStream_internal"); /* Finalize the compression parameters */ params.cParams = ZSTD_getCParamsFromCCtxParams(¶ms, pledgedSrcSize, dictSize); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, ZSTD_dtlm_fast, cdict, ¶ms, pledgedSrcSize, ZSTDb_buffered) , ""); cctx->inToCompress = 0; cctx->inBuffPos = 0; cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */ cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; cctx->streamStage = zcss_load; cctx->frameEnded = 0; return 0; /* ready to go */ } /* ZSTD_resetCStream(): * pledgedSrcSize == 0 means "unknown" */ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. * 0 will be interpreted as "empty" in the future. */ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); return 0; } /*! ZSTD_initCStream_internal() : * Note : for lib/compress only. Used by zstdmt_compress.c. * Assumption 1 : params are valid * Assumption 2 : either dict, or cdict, is defined, not both */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_internal"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); zcs->requestedParams = *params; assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if (dict) { FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); } else { /* Dictionary is cleared if !cdict */ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); } return 0; } /* ZSTD_initCStream_usingCDict_advanced() : * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); zcs->requestedParams.fParams = fParams; FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); return 0; } /* note : cdict must outlive compression session */ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) { DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); return 0; } /* ZSTD_initCStream_advanced() : * pledgedSrcSize must be exact. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pss) { /* for compatibility with older programs relying on this behavior. * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. * This line will be removed in the future. */ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_initCStream_advanced"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, ¶ms); FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); return 0; } size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream_usingDict"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); return 0; } size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. * 0 will be interpreted as "empty" in the future. */ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_initCStream_srcSize"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); return 0; } size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); return 0; } /*====== Compression ======*/ static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; if (hintInSize==0) hintInSize = cctx->blockSize; return hintInSize; } /** ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants * non-static, because can be called from zstdmt_compress.c * @return : hint size for next input */ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode) { const char* const istart = (const char*)input->src; const char* const iend = input->size != 0 ? istart + input->size : istart; const char* ip = input->pos != 0 ? istart + input->pos : istart; char* const ostart = (char*)output->dst; char* const oend = output->size != 0 ? ostart + output->size : ostart; char* op = output->pos != 0 ? ostart + output->pos : ostart; U32 someMoreWork = 1; /* check expectations */ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); assert(zcs->inBuff != NULL); assert(zcs->inBuffSize > 0); assert(zcs->outBuff != NULL); assert(zcs->outBuffSize > 0); assert(output->pos <= output->size); assert(input->pos <= input->size); while (someMoreWork) { switch(zcs->streamStage) { case zcss_init: RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); case zcss_load: if ( (flushMode == ZSTD_e_end) && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ size_t const cSize = ZSTD_compressEnd(zcs, op, oend-op, ip, iend-ip); DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); ip = iend; op += cSize; zcs->frameEnded = 1; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); someMoreWork = 0; break; } /* complete loading into inBuffer */ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; size_t const loaded = ZSTD_limitCopy( zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; if (loaded != 0) ip += loaded; if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ someMoreWork = 0; break; } if ( (flushMode == ZSTD_e_flush) && (zcs->inBuffPos == zcs->inToCompress) ) { /* empty */ someMoreWork = 0; break; } } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); { void* cDst; size_t cSize; size_t const iSize = zcs->inBuffPos - zcs->inToCompress; size_t oSize = oend-op; unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); if (oSize >= ZSTD_compressBound(iSize)) cDst = op; /* compress into output buffer, to skip flush stage */ else cDst = zcs->outBuff, oSize = zcs->outBuffSize; cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; /* prepare next block */ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; if (zcs->inBuffTarget > zcs->inBuffSize) zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; if (cDst == op) { /* no need to flush */ op += cSize; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed directly in outBuffer"); someMoreWork = 0; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); } break; } zcs->outBuffContentSize = cSize; zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_flush; /* pass-through to flush stage */ } /* fall-through */ case zcss_flush: DEBUGLOG(5, "flush stage"); { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op), zcs->outBuff + zcs->outBuffFlushedSize, toFlush); DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); if (flushed) op += flushed; zcs->outBuffFlushedSize += flushed; if (toFlush!=flushed) { /* flush not fully completed, presumably because dst is too small */ assert(op==oend); someMoreWork = 0; break; } zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed on flush"); someMoreWork = 0; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); break; } zcs->streamStage = zcss_load; break; } default: /* impossible */ assert(0); } } input->pos = ip - istart; output->pos = op - ostart; if (zcs->frameEnded) return 0; return ZSTD_nextInputSizeHint(zcs); } static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers >= 1) { assert(cctx->mtctx != NULL); return ZSTDMT_nextInputSizeHint(cctx->mtctx); } #endif return ZSTD_nextInputSizeHint(cctx); } size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , ""); return ZSTD_nextInputSizeHint_MTorST(zcs); } size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); /* check conditions */ RETURN_ERROR_IF(output->pos > output->size, GENERIC, "invalid buffer"); RETURN_ERROR_IF(input->pos > input->size, GENERIC, "invalid buffer"); assert(cctx!=NULL); /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { ZSTD_CCtx_params params = cctx->requestedParams; ZSTD_prefixDict const prefixDict = cctx->prefixDict; FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */ params.cParams = ZSTD_getCParamsFromCCtxParams( &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); #ifdef ZSTD_MULTITHREAD if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ } if (params.nbWorkers > 0) { /* mt context creation */ if (cctx->mtctx == NULL) { DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u", params.nbWorkers); cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem); RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!"); } /* mt compression */ DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); FORWARD_IF_ERROR( ZSTDMT_initCStream_internal( cctx->mtctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , ""); cctx->streamStage = zcss_load; cctx->appliedParams.nbWorkers = params.nbWorkers; } else #endif { FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , ""); assert(cctx->streamStage == zcss_load); assert(cctx->appliedParams.nbWorkers == 0); } } /* end of transparent initialization stage */ /* compression stage */ #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end); size_t flushMin; assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */); if (cctx->cParamsChanged) { ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); cctx->cParamsChanged = 0; } do { flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); if ( ZSTD_isError(flushMin) || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); } FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed"); } while (forceMaxProgress && flushMin != 0 && output->pos < output->size); DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic"); /* Either we don't require maximum forward progress, we've finished the * flush, or we are out of output space. */ assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size); return flushMin; } #endif FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); DEBUGLOG(5, "completed ZSTD_compressStream2"); return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ } size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; ZSTD_inBuffer input = { src, srcSize, *srcPos }; /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); *dstPos = output.pos; *srcPos = input.pos; return cErr; } size_t ZSTD_compress2(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize); ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); { size_t oPos = 0; size_t iPos = 0; size_t const result = ZSTD_compressStream2_simpleArgs(cctx, dst, dstCapacity, &oPos, src, srcSize, &iPos, ZSTD_e_end); FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); if (result != 0) { /* compression not completed, due to lack of output space */ assert(oPos == dstCapacity); RETURN_ERROR(dstSize_tooSmall, ""); } assert(iPos == srcSize); /* all input is expected consumed */ return oPos; } } /*====== Finalize ======*/ /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed"); if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ /* single thread mode : attempt to calculate remaining to flush more precisely */ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); return toFlush; } } /*-===== Pre-defined compression levels =====-*/ #define ZSTD_MAX_CLEVEL 22 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { { /* "default" - for any srcSize > 256 KB */ /* W, C, H, S, L, TL, strat */ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */ { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */ { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */ { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ }, { /* for srcSize <= 256 KB */ /* W, C, H, S, L, T, strat */ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/ { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ }, { /* for srcSize <= 128 KB */ /* W, C, H, S, L, T, strat */ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ }, { /* for srcSize <= 16 KB */ /* W, C, H, S, L, T, strat */ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ }, }; /*! ZSTD_getCParams_internal() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. * Use dictSize == 0 for unknown or unused. */ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN; size_t const addedSize = unknown && dictSize > 0 ? 500 : 0; U64 const rSize = unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize; U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); int row = compressionLevel; DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel); if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */ /* refine parameters based on srcSize & dictSize */ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); } } /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Size values are optional, provide 0 if not known or unused */ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize); } /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { ZSTD_parameters params; ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize); DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); memset(¶ms, 0, sizeof(params)); params.cParams = cParams; params.fParams.contentSizeFlag = 1; return params; } /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/hist.h0000644000175000017500000000655513771325506024316 0ustar useruser00000000000000/* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* --- dependencies --- */ #include /* size_t */ /* --- simple histogram functions --- */ /*! HIST_count(): * Provides the precise count of each byte within a table 'count'. * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). * Updates *maxSymbolValuePtr with actual largest symbol value detected. * @return : count of the most frequent symbol (which isn't identified). * or an error code, which can be tested using HIST_isError(). * note : if return == srcSize, there is only one symbol. */ size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */ /* --- advanced histogram functions --- */ #define HIST_WKSP_SIZE_U32 1024 #define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) /** HIST_count_wksp() : * Same as HIST_count(), but using an externally provided scratch buffer. * Benefit is this function will use very little stack space. * `workSpace` is a writable buffer which must be 4-bytes aligned, * `workSpaceSize` must be >= HIST_WKSP_SIZE */ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, void* workSpace, size_t workSpaceSize); /** HIST_countFast() : * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr. * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` */ size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); /** HIST_countFast_wksp() : * Same as HIST_countFast(), but using an externally provided scratch buffer. * `workSpace` is a writable buffer which must be 4-bytes aligned, * `workSpaceSize` must be >= HIST_WKSP_SIZE */ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, void* workSpace, size_t workSpaceSize); /*! HIST_count_simple() : * Same as HIST_countFast(), this function is unsafe, * and will segfault if any value within `src` is `> *maxSymbolValuePtr`. * It is also a bit slower for large inputs. * However, it does not need any additional memory (not even on stack). * @return : count of the most frequent symbol. * Note this function doesn't produce any error (i.e. it must succeed). */ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.h0000644000175000017500000002163013771325506026576 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDMT_COMPRESS_H #define ZSTDMT_COMPRESS_H #if defined (__cplusplus) extern "C" { #endif /* Note : This is an internal API. * These APIs used to be exposed with ZSTDLIB_API, * because it used to be the only way to invoke MT compression. * Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2() * instead. * * If you depend on these APIs and can't switch, then define * ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library. * However, we may completely remove these functions in a future * release, so please switch soon. * * This API requires ZSTD_MULTITHREAD to be defined during compilation, * otherwise ZSTDMT_createCCtx*() will fail. */ #ifdef ZSTD_LEGACY_MULTITHREADED_API # define ZSTDMT_API ZSTDLIB_API #else # define ZSTDMT_API #endif /* === Dependencies === */ #include /* size_t */ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ #include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ /* === Constants === */ #ifndef ZSTDMT_NBWORKERS_MAX # define ZSTDMT_NBWORKERS_MAX 200 #endif #ifndef ZSTDMT_JOBSIZE_MIN # define ZSTDMT_JOBSIZE_MIN (1 MB) #endif #define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30) #define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB)) /* === Memory management === */ typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers); /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem); ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx); ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx); /* === Simple one-pass compression function === */ ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel); /* === Streaming functions === */ ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel); ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */ ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx); ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ /* === Advanced functions and parameters === */ ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_parameters params, int overlapLog); ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */ ZSTD_parameters params, unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */ ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, const ZSTD_CDict* cdict, ZSTD_frameParameters fparams, unsigned long long pledgedSrcSize); /* note : zero means empty */ /* ZSTDMT_parameter : * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */ typedef enum { ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */ ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */ ZSTDMT_p_rsyncable /* Enables rsyncable mode. */ } ZSTDMT_parameter; /* ZSTDMT_setMTCtxParameter() : * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter. * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__ * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions. * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value); /* ZSTDMT_getMTCtxParameter() : * Query the ZSTDMT_CCtx for a parameter value. * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value); /*! ZSTDMT_compressStream_generic() : * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream() * depending on flush directive. * @return : minimum amount of data still to be flushed * 0 if fully flushed * or an error code * note : needs to be init using any ZSTD_initCStream*() variant */ ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp); /* ======================================================== * === Private interface, for use by ZSTD_compress.c === * === Not exposed in libzstd. Never invoke directly === * ======================================================== */ /*! ZSTDMT_toFlushNow() * Tell how many bytes are ready to be flushed immediately. * Probe the oldest active job (not yet entirely flushed) and check its output buffer. * If return 0, it means there is no active job, * or, it means oldest job is still active, but everything produced has been flushed so far, * therefore flushing is limited by speed of oldest job. */ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx); /*! ZSTDMT_CCtxParam_setMTCtxParameter() * like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */ size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value); /*! ZSTDMT_CCtxParam_setNbWorkers() * Set nbWorkers, and clamp it. * Also reset jobSize and overlapLog */ size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers); /*! ZSTDMT_updateCParams_whileCompressing() : * Updates only a selected set of compression parameters, to remain compatible with current frame. * New parameters will be applied to next compression job. */ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams); /*! ZSTDMT_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads. */ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx); /*! ZSTDMT_initCStream_internal() : * Private use only. Init streaming operation. * expects params to be valid. * must receive dict, or cdict, or none, but not both. * @return : 0, or an error code */ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); #if defined (__cplusplus) } #endif #endif /* ZSTDMT_COMPRESS_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_cwksp.h0000644000175000017500000004450113771325506025533 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_CWKSP_H #define ZSTD_CWKSP_H /*-************************************* * Dependencies ***************************************/ #include "../common/zstd_internal.h" #if defined (__cplusplus) extern "C" { #endif /*-************************************* * Constants ***************************************/ /* Since the workspace is effectively its own little malloc implementation / * arena, when we run under ASAN, we should similarly insert redzones between * each internal element of the workspace, so ASAN will catch overruns that * reach outside an object but that stay inside the workspace. * * This defines the size of that redzone. */ #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 #endif /*-************************************* * Structures ***************************************/ typedef enum { ZSTD_cwksp_alloc_objects, ZSTD_cwksp_alloc_buffers, ZSTD_cwksp_alloc_aligned } ZSTD_cwksp_alloc_phase_e; /** * Zstd fits all its internal datastructures into a single continuous buffer, * so that it only needs to perform a single OS allocation (or so that a buffer * can be provided to it and it can perform no allocations at all). This buffer * is called the workspace. * * Several optimizations complicate that process of allocating memory ranges * from this workspace for each internal datastructure: * * - These different internal datastructures have different setup requirements: * * - The static objects need to be cleared once and can then be trivially * reused for each compression. * * - Various buffers don't need to be initialized at all--they are always * written into before they're read. * * - The matchstate tables have a unique requirement that they don't need * their memory to be totally cleared, but they do need the memory to have * some bound, i.e., a guarantee that all values in the memory they've been * allocated is less than some maximum value (which is the starting value * for the indices that they will then use for compression). When this * guarantee is provided to them, they can use the memory without any setup * work. When it can't, they have to clear the area. * * - These buffers also have different alignment requirements. * * - We would like to reuse the objects in the workspace for multiple * compressions without having to perform any expensive reallocation or * reinitialization work. * * - We would like to be able to efficiently reuse the workspace across * multiple compressions **even when the compression parameters change** and * we need to resize some of the objects (where possible). * * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp * abstraction was created. It works as follows: * * Workspace Layout: * * [ ... workspace ... ] * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers] * * The various objects that live in the workspace are divided into the * following categories, and are allocated separately: * * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, * so that literally everything fits in a single buffer. Note: if present, * this must be the first object in the workspace, since ZSTD_free{CCtx, * CDict}() rely on a pointer comparison to see whether one or two frees are * required. * * - Fixed size objects: these are fixed-size, fixed-count objects that are * nonetheless "dynamically" allocated in the workspace so that we can * control how they're initialized separately from the broader ZSTD_CCtx. * Examples: * - Entropy Workspace * - 2 x ZSTD_compressedBlockState_t * - CDict dictionary contents * * - Tables: these are any of several different datastructures (hash tables, * chain tables, binary trees) that all respect a common format: they are * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). * Their sizes depend on the cparams. * * - Aligned: these buffers are used for various purposes that require 4 byte * alignment, but don't require any initialization before they're used. * * - Buffers: these buffers are used for various purposes that don't require * any alignment or initialization before they're used. This means they can * be moved around at no cost for a new compression. * * Allocating Memory: * * The various types of objects must be allocated in order, so they can be * correctly packed into the workspace buffer. That order is: * * 1. Objects * 2. Buffers * 3. Aligned * 4. Tables * * Attempts to reserve objects of different types out of order will fail. */ typedef struct { void* workspace; void* workspaceEnd; void* objectEnd; void* tableEnd; void* tableValidEnd; void* allocStart; int allocFailed; int workspaceOversizedDuration; ZSTD_cwksp_alloc_phase_e phase; } ZSTD_cwksp; /*-************************************* * Functions ***************************************/ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { (void)ws; assert(ws->workspace <= ws->objectEnd); assert(ws->objectEnd <= ws->tableEnd); assert(ws->objectEnd <= ws->tableValidEnd); assert(ws->tableEnd <= ws->allocStart); assert(ws->tableValidEnd <= ws->allocStart); assert(ws->allocStart <= ws->workspaceEnd); } /** * Align must be a power of 2. */ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { size_t const mask = align - 1; assert((align & mask) == 0); return (size + mask) & ~mask; } /** * Use this to determine how much space in the workspace we will consume to * allocate this object. (Normally it should be exactly the size of the object, * but under special conditions, like ASAN, where we pad each object, it might * be larger.) * * Since tables aren't currently redzoned, you don't need to call through this * to figure out how much space you need for the matchState tables. Everything * else is though. */ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; #else return size; #endif } MEM_STATIC void ZSTD_cwksp_internal_advance_phase( ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) { assert(phase >= ws->phase); if (phase > ws->phase) { if (ws->phase < ZSTD_cwksp_alloc_buffers && phase >= ZSTD_cwksp_alloc_buffers) { ws->tableValidEnd = ws->objectEnd; } if (ws->phase < ZSTD_cwksp_alloc_aligned && phase >= ZSTD_cwksp_alloc_aligned) { /* If unaligned allocations down from a too-large top have left us * unaligned, we need to realign our alloc ptr. Technically, this * can consume space that is unaccounted for in the neededSpace * calculation. However, I believe this can only happen when the * workspace is too large, and specifically when it is too large * by a larger margin than the space that will be consumed. */ /* TODO: cleaner, compiler warning friendly way to do this??? */ ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1)); if (ws->allocStart < ws->tableValidEnd) { ws->tableValidEnd = ws->allocStart; } } ws->phase = phase; } } /** * Returns whether this object/buffer/etc was allocated in this workspace. */ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); } /** * Internal function. Do not use directly. */ MEM_STATIC void* ZSTD_cwksp_reserve_internal( ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) { void* alloc; void* bottom = ws->tableEnd; ZSTD_cwksp_internal_advance_phase(ws, phase); alloc = (BYTE *)ws->allocStart - bytes; #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* over-reserve space */ alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; #endif DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining", alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); ZSTD_cwksp_assert_internal_consistency(ws); assert(alloc >= bottom); if (alloc < bottom) { DEBUGLOG(4, "cwksp: alloc failed!"); ws->allocFailed = 1; return NULL; } if (alloc < ws->tableValidEnd) { ws->tableValidEnd = alloc; } ws->allocStart = alloc; #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; __asan_unpoison_memory_region(alloc, bytes); #endif return alloc; } /** * Reserves and returns unaligned memory. */ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) { return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); } /** * Reserves and returns memory sized on and aligned on sizeof(unsigned). */ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) { assert((bytes & (sizeof(U32)-1)) == 0); return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned); } /** * Aligned on sizeof(unsigned). These buffers have the special property that * their values remain constrained, allowing us to re-use them without * memset()-ing them. */ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; void* alloc = ws->tableEnd; void* end = (BYTE *)alloc + bytes; void* top = ws->allocStart; DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); assert((bytes & (sizeof(U32)-1)) == 0); ZSTD_cwksp_internal_advance_phase(ws, phase); ZSTD_cwksp_assert_internal_consistency(ws); assert(end <= top); if (end > top) { DEBUGLOG(4, "cwksp: table alloc failed!"); ws->allocFailed = 1; return NULL; } ws->tableEnd = end; #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) __asan_unpoison_memory_region(alloc, bytes); #endif return alloc; } /** * Aligned on sizeof(void*). */ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); void* alloc = ws->objectEnd; void* end = (BYTE*)alloc + roundedBytes; #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* over-reserve space */ end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; #endif DEBUGLOG(5, "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); assert(((size_t)alloc & (sizeof(void*)-1)) == 0); assert((bytes & (sizeof(void*)-1)) == 0); ZSTD_cwksp_assert_internal_consistency(ws); /* we must be in the first phase, no advance is possible */ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { DEBUGLOG(4, "cwksp: object alloc failed!"); ws->allocFailed = 1; return NULL; } ws->objectEnd = end; ws->tableEnd = end; ws->tableValidEnd = end; #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; __asan_unpoison_memory_region(alloc, bytes); #endif return alloc; } MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the table re-use logic is sound, and that we don't * access table space that we haven't cleaned, we re-"poison" the table * space every time we mark it dirty. */ { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; assert(__msan_test_shadow(ws->objectEnd, size) == -1); __msan_poison(ws->objectEnd, size); } #endif assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); ws->tableValidEnd = ws->objectEnd; ZSTD_cwksp_assert_internal_consistency(ws); } MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { ws->tableValidEnd = ws->tableEnd; } ZSTD_cwksp_assert_internal_consistency(ws); } /** * Zero the part of the allocated tables not already marked clean. */ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); } ZSTD_cwksp_mark_tables_clean(ws); } /** * Invalidates table allocations. * All other allocations remain valid. */ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: clearing tables!"); #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } #endif ws->tableEnd = ws->objectEnd; ZSTD_cwksp_assert_internal_consistency(ws); } /** * Invalidates all buffer, aligned, and table allocations. * Object allocations remain valid. */ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: clearing!"); #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the context re-use logic is sound, and that we don't * access stuff that this compression hasn't initialized, we re-"poison" * the workspace (or at least the non-static, non-table parts of it) * every time we start a new compression. */ { size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd; __msan_poison(ws->tableValidEnd, size); } #endif #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) { size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } #endif ws->tableEnd = ws->objectEnd; ws->allocStart = ws->workspaceEnd; ws->allocFailed = 0; if (ws->phase > ZSTD_cwksp_alloc_buffers) { ws->phase = ZSTD_cwksp_alloc_buffers; } ZSTD_cwksp_assert_internal_consistency(ws); } /** * The provided workspace takes ownership of the buffer [start, start+size). * Any existing values in the workspace are ignored (the previously managed * buffer, if present, must be separately freed). */ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) { DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ ws->workspace = start; ws->workspaceEnd = (BYTE*)start + size; ws->objectEnd = ws->workspace; ws->tableValidEnd = ws->objectEnd; ws->phase = ZSTD_cwksp_alloc_objects; ZSTD_cwksp_clear(ws); ws->workspaceOversizedDuration = 0; ZSTD_cwksp_assert_internal_consistency(ws); } MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { void* workspace = ZSTD_malloc(size, customMem); DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); ZSTD_cwksp_init(ws, workspace, size); return 0; } MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { void *ptr = ws->workspace; DEBUGLOG(4, "cwksp: freeing workspace"); memset(ws, 0, sizeof(ZSTD_cwksp)); ZSTD_free(ptr, customMem); } /** * Moves the management of a workspace from one cwksp to another. The src cwksp * is left in an invalid state (src must be re-init()'ed before its used again). */ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { *dst = *src; memset(src, 0, sizeof(ZSTD_cwksp)); } MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); } MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { return ws->allocFailed; } /*-************************************* * Functions Checking Free Space ***************************************/ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); } MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; } MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { return ZSTD_cwksp_check_available( ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); } MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; } MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( ZSTD_cwksp* ws, size_t additionalNeededSpace) { if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { ws->workspaceOversizedDuration++; } else { ws->workspaceOversizedDuration = 0; } } #if defined (__cplusplus) } #endif #endif /* ZSTD_CWKSP_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.h0000644000175000017500000000222213771325506030462 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPRESS_ADVANCED_H #define ZSTD_COMPRESS_ADVANCED_H /*-************************************* * Dependencies ***************************************/ #include "../zstd.h" /* ZSTD_CCtx */ /*-************************************* * Target Compressed Block Size ***************************************/ /* ZSTD_compressSuperBlock() : * Used to compress a super block when targetCBlockSize is being used. * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, void const* src, size_t srcSize, unsigned lastBlock); #endif /* ZSTD_COMPRESS_ADVANCED_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/deprecated/0000755000175000017500000000000013771325773023436 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/deprecated/zbuff_common.c0000644000175000017500000000176213771325506026266 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include "../common/error_private.h" #include "zbuff.h" /*-**************************************** * ZBUFF Error Management (deprecated) ******************************************/ /*! ZBUFF_isError() : * tells if a return value is an error code */ unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); } /*! ZBUFF_getErrorName() : * provides error code string from function result (useful for debugging) */ const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/deprecated/zbuff.h0000644000175000017500000002635713771325506024732 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* *************************************************************** * NOTES/WARNINGS ******************************************************************/ /* The streaming API defined here is deprecated. * Consider migrating towards ZSTD_compressStream() API in `zstd.h` * See 'lib/README.md'. *****************************************************************/ #if defined (__cplusplus) extern "C" { #endif #ifndef ZSTD_BUFFERED_H_23987 #define ZSTD_BUFFERED_H_23987 /* ************************************* * Dependencies ***************************************/ #include /* size_t */ #include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ /* *************************************************************** * Compiler specifics *****************************************************************/ /* Deprecation warnings */ /* Should these warnings be a problem, * it is generally possible to disable them, * typically with -Wno-deprecated-declarations for gcc * or _CRT_SECURE_NO_WARNINGS in Visual. * Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS # define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */ #else # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API # elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message))) # elif defined(__GNUC__) && (__GNUC__ >= 3) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated)) # elif defined(_MSC_VER) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler") # define ZBUFF_DEPRECATED(message) ZSTDLIB_API # endif #endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */ /* ************************************* * Streaming functions ***************************************/ /* This is the easier "buffered" streaming API, * using an internal buffer to lift all restrictions on user-provided buffers * which can be any size, any place, for both input and output. * ZBUFF and ZSTD are 100% interoperable, * frames created by one can be decoded by the other one */ typedef ZSTD_CStream ZBUFF_CCtx; ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void); ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx); ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel); ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); /*-************************************************* * Streaming compression - howto * * A ZBUFF_CCtx object is required to track streaming operation. * Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. * ZBUFF_CCtx objects can be reused multiple times. * * Start by initializing ZBUF_CCtx. * Use ZBUFF_compressInit() to start a new compression operation. * Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary. * * Use ZBUFF_compressContinue() repetitively to consume input stream. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst . * @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency) * or an error code, which can be tested using ZBUFF_isError(). * * At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush(). * The nb of bytes written into `dst` will be reported into *dstCapacityPtr. * Note that the function cannot output more than *dstCapacityPtr, * therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * ZBUFF_compressEnd() instructs to finish a frame. * It will perform a flush and write frame epilogue. * The epilogue is required for decoders to consider a frame completed. * Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. * In which case, call again ZBUFF_compressFlush() to complete the flush. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize() * input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency) * output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering. * By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering. * **************************************************/ typedef ZSTD_DStream ZBUFF_DCtx; ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void); ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx); ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx); ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize); ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression howto * * A ZBUFF_DCtx object is required to track streaming operations. * Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. * Use ZBUFF_decompressInit() to start a new decompression operation, * or ZBUFF_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFF_DCtx objects can be re-init multiple times. * * Use ZBUFF_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. * @return : 0 when a frame is completely decoded and fully flushed, * 1 when there is still some data left within internal buffer to flush, * >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency), * or an error code, which can be tested using ZBUFF_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize() * output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFF_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode); ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, they tend to offer better latency */ ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void); ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void); ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void); ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void); #endif /* ZSTD_BUFFERED_H_23987 */ #ifdef ZBUFF_STATIC_LINKING_ONLY #ifndef ZBUFF_STATIC_H_30298098432 #define ZBUFF_STATIC_H_30298098432 /* ==================================================================================== * The definitions in this section are considered experimental. * They should never be used in association with a dynamic library, as they may change in the future. * They are provided for advanced usages. * Use them only in association with static linking. * ==================================================================================== */ /*--- Dependency ---*/ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */ #include "../zstd.h" /*--- Custom memory allocator ---*/ /*! ZBUFF_createCCtx_advanced() : * Create a ZBUFF compression context using external alloc and free functions */ ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem); /*! ZBUFF_createDCtx_advanced() : * Create a ZBUFF decompression context using external alloc and free functions */ ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem); /*--- Advanced Streaming Initialization ---*/ ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); #endif /* ZBUFF_STATIC_H_30298098432 */ #endif /* ZBUFF_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif borgbackup-1.1.15/src/borg/algorithms/zstd/lib/deprecated/zbuff_decompress.c0000644000175000017500000000357413771325506027145 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ************************************* * Dependencies ***************************************/ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" ZBUFF_DCtx* ZBUFF_createDCtx(void) { return ZSTD_createDStream(); } ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem) { return ZSTD_createDStream_advanced(customMem); } size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd) { return ZSTD_freeDStream(zbd); } /* *** Initialization *** */ size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize) { return ZSTD_initDStream_usingDict(zbd, dict, dictSize); } size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd) { return ZSTD_initDStream(zbd); } /* *** Decompression *** */ size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr) { ZSTD_outBuffer outBuff; ZSTD_inBuffer inBuff; size_t result; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; inBuff.src = src; inBuff.pos = 0; inBuff.size = *srcSizePtr; result = ZSTD_decompressStream(zbd, &outBuff, &inBuff); *dstCapacityPtr = outBuff.pos; *srcSizePtr = inBuff.pos; return result; } /* ************************************* * Tool functions ***************************************/ size_t ZBUFF_recommendedDInSize(void) { return ZSTD_DStreamInSize(); } size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/deprecated/zbuff_compress.c0000644000175000017500000001232313771325506026624 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ************************************* * Dependencies ***************************************/ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" /*-*********************************************************** * Streaming compression * * A ZBUFF_CCtx object is required to track streaming operation. * Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. * Use ZBUFF_compressInit() to start a new compression operation. * ZBUFF_CCtx objects can be reused multiple times. * * Use ZBUFF_compressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input. * The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst . * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) * or an error code, which can be tested using ZBUFF_isError(). * * ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer. * Note that it will not output more than *dstCapacityPtr. * Therefore, some content might still be left into its internal buffer if dst buffer is too small. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * ZBUFF_compressEnd() instructs to finish a frame. * It will perform a flush and write frame epilogue. * Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * Hint : recommended buffer sizes (not compulsory) * input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value. * output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed. * ***********************************************************/ ZBUFF_CCtx* ZBUFF_createCCtx(void) { return ZSTD_createCStream(); } ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem) { return ZSTD_createCStream_advanced(customMem); } size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc) { return ZSTD_freeCStream(zbc); } /* ====== Initialization ====== */ size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* preserve "0 == unknown" behavior */ return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize); } size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel) { return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel); } size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel) { return ZSTD_initCStream(zbc, compressionLevel); } /* ====== Compression ====== */ size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr) { size_t result; ZSTD_outBuffer outBuff; ZSTD_inBuffer inBuff; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; inBuff.src = src; inBuff.pos = 0; inBuff.size = *srcSizePtr; result = ZSTD_compressStream(zbc, &outBuff, &inBuff); *dstCapacityPtr = outBuff.pos; *srcSizePtr = inBuff.pos; return result; } /* ====== Finalize ====== */ size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) { size_t result; ZSTD_outBuffer outBuff; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; result = ZSTD_flushStream(zbc, &outBuff); *dstCapacityPtr = outBuff.pos; return result; } size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) { size_t result; ZSTD_outBuffer outBuff; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; result = ZSTD_endStream(zbc, &outBuff); *dstCapacityPtr = outBuff.pos; return result; } /* ************************************* * Tool functions ***************************************/ size_t ZBUFF_recommendedCInSize(void) { return ZSTD_CStreamInSize(); } size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/zstd.h0000644000175000017500000035674513771325506022511 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #if defined (__cplusplus) extern "C" { #endif #ifndef ZSTD_H_235446 #define ZSTD_H_235446 /* ====== Dependency ======*/ #include /* INT_MAX */ #include /* size_t */ /* ===== ZSTDLIB_API : control library symbols visibility ===== */ #ifndef ZSTDLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZSTDLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZSTDLIB_API ZSTDLIB_VISIBILITY #endif /******************************************************************************* Introduction zstd, short for Zstandard, is a fast lossless compression algorithm, targeting real-time compression scenarios at zlib-level and better compression ratios. The zstd compression library provides in-memory compression and decompression functions. The library supports regular compression levels from 1 up to ZSTD_maxCLevel(), which is currently 22. Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory. The library also offers negative compression levels, which extend the range of speed vs. ratio preferences. The lower the level, the faster the speed (at the cost of compression). Compression can be done in: - a single step (described as Simple API) - a single step, reusing a context (described as Explicit context) - unbounded multiple steps (described as Streaming compression) The compression ratio achievable on small data can be highly improved using a dictionary. Dictionary compression can be performed in: - a single step (described as Simple dictionary API) - a single step, reusing a dictionary (described as Bulk-processing dictionary API) Advanced experimental functions can be accessed using `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h. Advanced experimental APIs should never be used with a dynamically-linked library. They are not "stable"; their definitions or signatures may change in the future. Only static linking is allowed. *******************************************************************************/ /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 4 #define ZSTD_VERSION_RELEASE 5 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */ #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE #define ZSTD_QUOTE(str) #str #define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str) #define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */ /* ************************************* * Default constant ***************************************/ #ifndef ZSTD_CLEVEL_DEFAULT # define ZSTD_CLEVEL_DEFAULT 3 #endif /* ************************************* * Constants ***************************************/ /* All magic numbers are supposed read/written to/from files/memory using little-endian convention */ #define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */ #define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */ #define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */ #define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0 #define ZSTD_BLOCKSIZELOG_MAX 17 #define ZSTD_BLOCKSIZE_MAX (1<= `ZSTD_compressBound(srcSize)`. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel); /*! ZSTD_decompress() : * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. * `dstCapacity` is an upper bound of originalSize to regenerate. * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /*! ZSTD_getFrameContentSize() : requires v1.3.0+ * `src` should point to the start of a ZSTD encoded frame. * `srcSize` must be at least as large as the frame header. * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. * @return : - decompressed size of `src` frame content, if known * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) * note 1 : a 0 return value means the frame is valid but "empty". * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. * In which case, it's necessary to use streaming mode to decompress data. * Optionally, application can rely on some implicit limit, * as ZSTD_decompress() only needs an upper bound of decompressed size. * (For example, data could be necessarily cut into blocks <= 16 KB). * note 3 : decompressed size is always present when compression is completed using single-pass functions, * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). * note 4 : decompressed size can be very large (64-bits value), * potentially larger than what local system can handle as a single memory segment. * In which case, it's necessary to use streaming mode to decompress data. * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. * Always ensure return value fits within application's authorized limits. * Each application can set its own limits. * note 6 : This function replaces ZSTD_getDecompressedSize() */ #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) #define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); /*! ZSTD_getDecompressedSize() : * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). * Both functions work the same way, but ZSTD_getDecompressedSize() blends * "empty", "unknown" and "error" results to the same return value (0), * while ZSTD_getFrameContentSize() gives them separate return values. * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_findFrameCompressedSize() : * `src` should point to the start of a ZSTD frame or skippable frame. * `srcSize` must be >= first frame size * @return : the compressed size of the first frame starting at `src`, * suitable to pass as `srcSize` to `ZSTD_decompress` or similar, * or an error code if input is invalid */ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); /*====== Helper functions ======*/ #define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */ ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ /*************************************** * Explicit context ***************************************/ /*= Compression context * When compressing many times, * it is recommended to allocate a context just once, * and re-use it for each successive compression operation. * This will make workload friendlier for system's memory. * Note : re-using context is just a speed / resource optimization. * It doesn't change the compression ratio, which remains identical. * Note 2 : In multi-threaded environments, * use one different context per thread for parallel execution. */ typedef struct ZSTD_CCtx_s ZSTD_CCtx; ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /*! ZSTD_compressCCtx() : * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. * Important : in order to behave similarly to `ZSTD_compress()`, * this function compresses at requested compression level, * __ignoring any other parameter__ . * If any advanced parameter was set using the advanced API, * they will all be reset. Only `compressionLevel` remains. */ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel); /*= Decompression context * When decompressing many times, * it is recommended to allocate a context only once, * and re-use it for each successive compression operation. * This will make workload friendlier for system's memory. * Use one context per thread for parallel execution. */ typedef struct ZSTD_DCtx_s ZSTD_DCtx; ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void); ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /*! ZSTD_decompressDCtx() : * Same as ZSTD_decompress(), * requires an allocated ZSTD_DCtx. * Compatible with sticky parameters. */ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*************************************** * Advanced compression API ***************************************/ /* API design : * Parameters are pushed one by one into an existing context, * using ZSTD_CCtx_set*() functions. * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame. * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` ! * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ . * * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). * * This API supercedes all other "advanced" API entry points in the experimental section. * In the future, we expect to remove from experimental API entry points which are redundant with this API. */ /* Compression strategies, listed from fastest to strongest */ typedef enum { ZSTD_fast=1, ZSTD_dfast=2, ZSTD_greedy=3, ZSTD_lazy=4, ZSTD_lazy2=5, ZSTD_btlazy2=6, ZSTD_btopt=7, ZSTD_btultra=8, ZSTD_btultra2=9 /* note : new strategies _might_ be added in the future. Only the order (from fast to strong) is guaranteed */ } ZSTD_strategy; typedef enum { /* compression parameters * Note: When compressing with a ZSTD_CDict these parameters are superseded * by the parameters used to construct the ZSTD_CDict. * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */ ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table. * Note that exact compression parameters are dynamically determined, * depending on both compression level and srcSize (when known). * Default level is ZSTD_CLEVEL_DEFAULT==3. * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. * Note 1 : it's possible to pass a negative compression level. * Note 2 : setting a level does not automatically set all other compression parameters * to default. Setting this will however eventually dynamically impact the compression * parameters which have not been manually set. The manually set * ones will 'stick'. */ /* Advanced compression parameters : * It's possible to pin down compression parameters to some specific values. * In which case, these values are no longer dynamically selected by the compressor */ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2. * This will set a memory budget for streaming decompression, * with larger values requiring more memory * and typically compressing more. * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. * Special: value 0 means "use default windowLog". * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT * requires explicitly allowing such size at streaming decompression stage. */ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2. * Resulting memory usage is (1 << (hashLog+2)). * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. * Larger tables improve compression ratio of strategies <= dFast, * and improve speed of strategies > dFast. * Special: value 0 means "use default hashLog". */ ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2. * Resulting memory usage is (1 << (chainLog+2)). * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. * Larger tables result in better and slower compression. * This parameter is useless for "fast" strategy. * It's still useful when using "dfast" strategy, * in which case it defines a secondary probe table. * Special: value 0 means "use default chainLog". */ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2. * More attempts result in better and slower compression. * This parameter is useless for "fast" and "dFast" strategies. * Special: value 0 means "use default searchLog". */ ZSTD_c_minMatch=105, /* Minimum size of searched matches. * Note that Zstandard can still find matches of smaller size, * it just tweaks its search algorithm to look for this size and larger. * Larger values increase compression and decompression speed, but decrease ratio. * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX. * Note that currently, for all strategies < btopt, effective minimum is 4. * , for all strategies > fast, effective maximum is 6. * Special: value 0 means "use default minMatchLength". */ ZSTD_c_targetLength=106, /* Impact of this field depends on strategy. * For strategies btopt, btultra & btultra2: * Length of Match considered "good enough" to stop search. * Larger values make compression stronger, and slower. * For strategy fast: * Distance between match sampling. * Larger values make compression faster, and weaker. * Special: value 0 means "use default targetLength". */ ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition. * The higher the value of selected strategy, the more complex it is, * resulting in stronger and slower compression. * Special: value 0 means "use default strategy". */ /* LDM mode parameters */ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. * This parameter is designed to improve compression ratio * for large inputs, by finding large matches at long distance. * It increases memory usage and window size. * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB * except when expressly set to a different value. */ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2. * Larger values increase memory usage and compression ratio, * but decrease compression speed. * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX * default: windowlog - 7. * Special: value 0 means "automatically determine hashlog". */ ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher. * Larger/too small values usually decrease compression ratio. * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. * Special: value 0 means "use default value" (default: 64). */ ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution. * Larger values improve collision resolution but decrease compression speed. * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. * Special: value 0 means "use default value" (default: 3). */ ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table. * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. * Larger values improve compression speed. * Deviating far from default value will likely result in a compression ratio decrease. * Special: value 0 means "automatically determine hashRateLog". */ /* frame parameters */ ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1) * Content size must be known at the beginning of compression. * This is automatically the case when using ZSTD_compress2(), * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */ /* multi-threading parameters */ /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD). * They return an error otherwise. */ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel. * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() : * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, * while compression work is performed in parallel, within worker threads. * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). * More workers improve speed, but also increase memory usage. * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1. * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. * 0 means default, which is dynamically determined based on compression parameters. * Job size must be a minimum of overlap size, or 1 MB, whichever is largest. * The minimum size is automatically and transparently enforced. */ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size. * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. * It helps preserve compression ratio, while each job is compressed in parallel. * This value is enforced only when nbWorkers >= 1. * Larger values increase compression ratio, but decrease speed. * Possible values range from 0 to 9 : * - 0 means "default" : value will be determined by the library, depending on strategy * - 1 means "no overlap" * - 9 means "full overlap", using a full window size. * Each intermediate rank increases/decreases load size by a factor 2 : * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default * default value varies between 6 and 9, depending on strategy */ /* note : additional experimental parameters are also available * within the experimental section of the API. * At the time of this writing, they include : * ZSTD_c_rsyncable * ZSTD_c_format * ZSTD_c_forceMaxWindow * ZSTD_c_forceAttachDict * ZSTD_c_literalCompressionMode * ZSTD_c_targetCBlockSize * ZSTD_c_srcSizeHint * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. */ ZSTD_c_experimentalParam1=500, ZSTD_c_experimentalParam2=10, ZSTD_c_experimentalParam3=1000, ZSTD_c_experimentalParam4=1001, ZSTD_c_experimentalParam5=1002, ZSTD_c_experimentalParam6=1003, ZSTD_c_experimentalParam7=1004 } ZSTD_cParameter; typedef struct { size_t error; int lowerBound; int upperBound; } ZSTD_bounds; /*! ZSTD_cParam_getBounds() : * All parameters must belong to an interval with lower and upper bounds, * otherwise they will either trigger an error or be automatically clamped. * @return : a structure, ZSTD_bounds, which contains * - an error status field, which must be tested using ZSTD_isError() * - lower and upper bounds, both inclusive */ ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam); /*! ZSTD_CCtx_setParameter() : * Set one compression parameter, selected by enum ZSTD_cParameter. * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). * Setting a parameter is generally only possible during frame initialization (before starting compression). * Exception : when using multi-threading mode (nbWorkers >= 1), * the following parameters can be updated _during_ compression (within same frame): * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. * new parameters will be active for next job only (after a flush()). * @return : an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value); /*! ZSTD_CCtx_setPledgedSrcSize() : * Total input data size to be compressed as a single frame. * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. * This value will also be controlled at end of frame, and trigger an error if not respected. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. * Note 2 : pledgedSrcSize is only valid once, for the next frame. * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. * Note 3 : Whenever all input data is provided and consumed in a single round, * for example with ZSTD_compress2(), * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), * this value is automatically overridden by srcSize instead. */ ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize); typedef enum { ZSTD_reset_session_only = 1, ZSTD_reset_parameters = 2, ZSTD_reset_session_and_parameters = 3 } ZSTD_ResetDirective; /*! ZSTD_CCtx_reset() : * There are 2 different things that can be reset, independently or jointly : * - The session : will stop compressing current frame, and make CCtx ready to start a new one. * Useful after an error, or to interrupt any ongoing compression. * Any internal data not yet flushed is cancelled. * Compression parameters and dictionary remain unchanged. * They will be used to compress next frame. * Resetting session never fails. * - The parameters : changes all parameters back to "default". * This removes any reference to any dictionary too. * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) * - Both : similar to resetting the session, followed by resetting parameters. */ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); /*! ZSTD_compress2() : * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. * ZSTD_compress2() always starts a new frame. * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - The function is always blocking, returns when compression is completed. * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*************************************** * Advanced decompression API ***************************************/ /* The advanced API pushes parameters one by one into an existing DCtx context. * Parameters are sticky, and remain valid for all following frames * using the same DCtx context. * It's possible to reset parameters to default values using ZSTD_DCtx_reset(). * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream(). * Therefore, no new decompression function is necessary. */ typedef enum { ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which * the streaming API will refuse to allocate memory buffer * in order to protect the host from unreasonable memory requirements. * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). * Special: value 0 means "use default maximum windowLog". */ /* note : additional experimental parameters are also available * within the experimental section of the API. * At the time of this writing, they include : * ZSTD_d_format * ZSTD_d_stableOutBuffer * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly */ ZSTD_d_experimentalParam1=1000, ZSTD_d_experimentalParam2=1001 } ZSTD_dParameter; /*! ZSTD_dParam_getBounds() : * All parameters must belong to an interval with lower and upper bounds, * otherwise they will either trigger an error or be automatically clamped. * @return : a structure, ZSTD_bounds, which contains * - an error status field, which must be tested using ZSTD_isError() * - both lower and upper bounds, inclusive */ ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam); /*! ZSTD_DCtx_setParameter() : * Set one compression parameter, selected by enum ZSTD_dParameter. * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). * Setting a parameter is only possible during frame initialization (before starting decompression). * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value); /*! ZSTD_DCtx_reset() : * Return a DCtx to clean state. * Session and parameters can be reset jointly or separately. * Parameters can only be reset when no active frame is being decompressed. * @return : 0, or an error code, which can be tested with ZSTD_isError() */ ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset); /**************************** * Streaming ****************************/ typedef struct ZSTD_inBuffer_s { const void* src; /**< start of input buffer */ size_t size; /**< size of input buffer */ size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ } ZSTD_inBuffer; typedef struct ZSTD_outBuffer_s { void* dst; /**< start of output buffer */ size_t size; /**< size of output buffer */ size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ } ZSTD_outBuffer; /*-*********************************************************************** * Streaming compression - HowTo * * A ZSTD_CStream object is required to track streaming operation. * Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. * ZSTD_CStream objects can be reused multiple times on consecutive compression operations. * It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. * * For parallel execution, use one separate ZSTD_CStream per thread. * * note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing. * * Parameters are sticky : when starting a new compression on the same context, * it will re-use the same sticky parameters as previous compression session. * When in doubt, it's recommended to fully initialize the context before usage. * Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(), * ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to * set more specific parameters, the pledged source size, or load a dictionary. * * Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to * consume input stream. The function will automatically update both `pos` * fields within `input` and `output`. * Note that the function may not consume the entire input, for example, because * the output buffer is already full, in which case `input.pos < input.size`. * The caller must check if input has been entirely consumed. * If not, the caller must make some room to receive more compressed data, * and then present again remaining input data. * note: ZSTD_e_continue is guaranteed to make some forward progress when called, * but doesn't guarantee maximal forward progress. This is especially relevant * when compressing with multiple threads. The call won't block if it can * consume some input, but if it can't it will wait for some, but not all, * output to be flushed. * @return : provides a minimum amount of data remaining to be flushed from internal buffers * or an error code, which can be tested using ZSTD_isError(). * * At any moment, it's possible to flush whatever data might remain stuck within internal buffer, * using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated. * Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0). * In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush. * You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the * operation. * note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will * block until the flush is complete or the output buffer is full. * @return : 0 if internal buffers are entirely flushed, * >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), * or an error code, which can be tested using ZSTD_isError(). * * Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame. * It will perform a flush and write frame epilogue. * The epilogue is required for decoders to consider a frame completed. * flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush. * You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to * start a new frame. * note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will * block until the flush is complete or the output buffer is full. * @return : 0 if frame fully completed and fully flushed, * >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), * or an error code, which can be tested using ZSTD_isError(). * * *******************************************************************/ typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */ /*===== ZSTD_CStream management functions =====*/ ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void); ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /*===== Streaming compression functions =====*/ typedef enum { ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */ ZSTD_e_flush=1, /* flush any data provided so far, * it creates (at least) one new block, that can be decoded immediately on reception; * frame will continue: any future data can still reference previously compressed data, improving compression. * note : multithreaded compression will block to flush as much output as possible. */ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame. * note that frame is only closed after compressed data is fully flushed (return value == 0). * After that point, any additional data starts a new frame. * note : each frame is independent (does not reference any content from previous frame). : note : multithreaded compression will block to flush as much output as possible. */ } ZSTD_EndDirective; /*! ZSTD_compressStream2() : * Behaves about the same as ZSTD_compressStream, with additional control on end directive. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode) * - output->pos must be <= dstCapacity, input->pos must be <= srcSize * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit. * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller. * - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available, * and then immediately returns, just indicating that there is some data remaining to be flushed. * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte. * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking. * - @return provides a minimum amount of data remaining to be flushed from internal buffers * or an error code, which can be tested using ZSTD_isError(). * if @return != 0, flush is not fully completed, there is still some data left within internal buffers. * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers. * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed. * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0), * only ZSTD_e_end or ZSTD_e_flush operations are allowed. * Before starting a new compression job, or changing compression parameters, * it is required to fully flush internal buffers. */ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp); /* These buffer sizes are softly recommended. * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output. * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(), * reducing the amount of memory shuffling and buffering, resulting in minor performance savings. * * However, note that these recommendations are from the perspective of a C caller program. * If the streaming interface is invoked from some other language, * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo, * a major performance rule is to reduce crossing such interface to an absolute minimum. * It's not rare that performance ends being spent more into the interface, rather than compression itself. * In which cases, prefer using large buffers, as large as practical, * for both input and output, to reduce the nb of roundtrips. */ ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */ /* ***************************************************************************** * This following is a legacy streaming API. * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. * Advanced parameters and dictionary compression can only be used through the * new API. ******************************************************************************/ /*! * Equivalent to: * * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); */ ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); /*! * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). * NOTE: The return value is different. ZSTD_compressStream() returns a hint for * the next read size (if non-zero and not an error). ZSTD_compressStream2() * returns the minimum nb of bytes left to flush (if non-zero and not an error). */ ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */ ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); /*-*************************************************************************** * Streaming decompression - HowTo * * A ZSTD_DStream object is required to track streaming operations. * Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. * ZSTD_DStream objects can be re-used multiple times. * * Use ZSTD_initDStream() to start a new decompression operation. * @return : recommended first input size * Alternatively, use advanced API to set specific properties. * * Use ZSTD_decompressStream() repetitively to consume your input. * The function will update both `pos` fields. * If `input.pos < input.size`, some input has not been consumed. * It's up to the caller to present again remaining data. * The function tries to flush all data decoded immediately, respecting output buffer size. * If `output.pos < output.size`, decoder has flushed everything it could. * But if `output.pos == output.size`, there might be some data left within internal buffers., * In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer. * Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX. * @return : 0 when a frame is completely decoded and fully flushed, * or an error code, which can be tested using ZSTD_isError(), * or any other value > 0, which means there is still some decoding or flushing to do to complete current frame : * the return value is a suggested next input size (just a hint for better latency) * that will never request more than the remaining frame size. * *******************************************************************************/ typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */ /*===== ZSTD_DStream management functions =====*/ ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void); ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /*===== Streaming decompression functions =====*/ /* This function is redundant with the advanced API and equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_refDDict(zds, NULL); */ ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */ /************************** * Simple dictionary API ***************************/ /*! ZSTD_compress_usingDict() : * Compression at an explicit compression level using a Dictionary. * A dictionary can be any arbitrary data segment (also called a prefix), * or a buffer with specified information (see dictBuilder/zdict.h). * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, int compressionLevel); /*! ZSTD_decompress_usingDict() : * Decompression using a known Dictionary. * Dictionary must be identical to the one used during compression. * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*********************************** * Bulk processing dictionary API **********************************/ typedef struct ZSTD_CDict_s ZSTD_CDict; /*! ZSTD_createCDict() : * When compressing multiple messages or blocks using the same dictionary, * it's recommended to digest the dictionary only once, since it's a costly operation. * ZSTD_createCDict() will create a state from digesting a dictionary. * The resulting state can be used for future compression operations with very limited startup cost. * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, * in which case the only thing that it transports is the @compressionLevel. * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_freeCDict() : * Function frees memory allocated by ZSTD_createCDict(). */ ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict); /*! ZSTD_compress_usingCDict() : * Compression using a digested Dictionary. * Recommended when same dictionary is used multiple times. * Note : compression level is _decided at dictionary creation time_, * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */ ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict); typedef struct ZSTD_DDict_s ZSTD_DDict; /*! ZSTD_createDDict() : * Create a digested dictionary, ready to start decompression operation without startup delay. * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); /*! ZSTD_freeDDict() : * Function frees memory allocated with ZSTD_createDDict() */ ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict); /*! ZSTD_decompress_usingDDict() : * Decompression using a digested Dictionary. * Recommended when same dictionary is used multiple times. */ ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_DDict* ddict); /******************************** * Dictionary helper functions *******************************/ /*! ZSTD_getDictID_fromDict() : * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); /*! ZSTD_getDictID_fromDDict() : * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); /*! ZSTD_getDictID_fromFrame() : * Provides the dictID required to decompressed the frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary to be decoded (most common case). * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). * - This is not a Zstandard frame. * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); /******************************************************************************* * Advanced dictionary and prefix API * * This API allows dictionaries to be used with ZSTD_compress2(), * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and * only reset with the context is reset with ZSTD_reset_parameters or * ZSTD_reset_session_and_parameters. Prefixes are single-use. ******************************************************************************/ /*! ZSTD_CCtx_loadDictionary() : * Create an internal CDict from `dict` buffer. * Decompression will have to use same dictionary. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, * meaning "return to no-dictionary mode". * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). * Note 2 : Loading a dictionary involves building tables. * It's also a CPU consuming operation, with non-negligible impact on latency. * Tables are dependent on compression parameters, and for this reason, * compression parameters can no longer be changed after loading a dictionary. * Note 3 :`dict` content will be copied internally. * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. * In such a case, dictionary buffer must outlive its users. * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() * to precisely select how dictionary content must be interpreted. */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_refCDict() : * Reference a prepared dictionary, to be used for all next compressed frames. * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. * The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. * The dictionary will remain valid for future compressed frames using same CCtx. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Referencing a NULL CDict means "return to no-dictionary mode". * Note 1 : Currently, only one dictionary can be managed. * Referencing a new dictionary effectively "discards" any previous one. * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*! ZSTD_CCtx_refPrefix() : * Reference a prefix (single-usage dictionary) for next compressed frame. * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). * Decompression will need same prefix to properly regenerate data. * Compressing with a prefix is similar in outcome as performing a diff and compressing it, * but performs much faster, especially during decompression (compression speed is tunable with compression level). * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary * Note 1 : Prefix buffer is referenced. It **must** outlive compression. * Its content must remain unmodified during compression. * Note 2 : If the intention is to diff some large src data blob with some prior version of itself, * ensure that the window size is large enough to contain the entire source. * See ZSTD_c_windowLog. * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. * It's a CPU consuming operation, with non-negligible impact on latency. * If there is a need to use the same prefix multiple times, consider loadDictionary instead. * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); /*! ZSTD_DCtx_loadDictionary() : * Create an internal DDict from dict buffer, * to be used to decompress next frames. * The dictionary remains valid for all future frames, until explicitly invalidated. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, * meaning "return to no-dictionary mode". * Note 1 : Loading a dictionary involves building tables, * which has a non-negligible impact on CPU usage and latency. * It's recommended to "load once, use many times", to amortize the cost * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading. * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of * how dictionary content is loaded and interpreted. */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /*! ZSTD_DCtx_refDDict() : * Reference a prepared dictionary, to be used to decompress next frames. * The dictionary remains active for decompression of future frames using same DCtx. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Note 1 : Currently, only one dictionary can be managed. * Referencing a new dictionary effectively "discards" any previous one. * Special: referencing a NULL DDict means "return to no-dictionary mode". * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. */ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); /*! ZSTD_DCtx_refPrefix() : * Reference a prefix (single-usage dictionary) to decompress next frame. * This is the reverse operation of ZSTD_CCtx_refPrefix(), * and must use the same prefix as the one used during compression. * Prefix is **only used once**. Reference is discarded at end of frame. * End of frame is reached when ZSTD_decompressStream() returns 0. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. * Prefix buffer must remain unmodified up to the end of frame, * reached when ZSTD_decompressStream() returns 0. * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. * A full dictionary is more costly, as it requires building tables. */ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize); /* === Memory management === */ /*! ZSTD_sizeof_*() : * These functions give the _current_ memory usage of selected object. * Note that object memory usage can evolve (increase or decrease) over time. */ ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx); ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #endif /* ZSTD_H_235446 */ /* ************************************************************************************** * ADVANCED AND EXPERIMENTAL FUNCTIONS **************************************************************************************** * The definitions in the following section are considered experimental. * They are provided for advanced scenarios. * They should never be used with a dynamic library, as prototypes may change in the future. * Use them only in association with static linking. * ***************************************************************************************/ #if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY /**************************************************************************************** * experimental API (static linking only) **************************************************************************************** * The following symbols and constants * are not planned to join "stable API" status in the near future. * They can still change in future versions. * Some of them are planned to remain in the static_only section indefinitely. * Some of them might be removed in the future (especially when redundant with existing stable functions) * ***************************************************************************************/ #define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */ #define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2) #define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */ #define ZSTD_SKIPPABLEHEADERSIZE 8 /* compression parameter bounds */ #define ZSTD_WINDOWLOG_MAX_32 30 #define ZSTD_WINDOWLOG_MAX_64 31 #define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64)) #define ZSTD_WINDOWLOG_MIN 10 #define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30) #define ZSTD_HASHLOG_MIN 6 #define ZSTD_CHAINLOG_MAX_32 29 #define ZSTD_CHAINLOG_MAX_64 30 #define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64)) #define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN #define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) #define ZSTD_SEARCHLOG_MIN 1 #define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */ #define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */ #define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX #define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ #define ZSTD_STRATEGY_MIN ZSTD_fast #define ZSTD_STRATEGY_MAX ZSTD_btultra2 #define ZSTD_OVERLAPLOG_MIN 0 #define ZSTD_OVERLAPLOG_MAX 9 #define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame * requiring larger than (1< 3, then this is seqDef.offset - 3 * If seqDef.offset < 3, then this is the corresponding repeat offset * But if seqDef.offset < 3 and litLength == 0, this is the * repeat offset before the corresponding repeat offset * And if seqDef.offset == 3 and litLength == 0, this is the * most recent repeat offset - 1 */ unsigned int offset; unsigned int litLength; /* Literal length */ unsigned int matchLength; /* Match length */ /* 0 when seq not rep and seqDef.offset otherwise * when litLength == 0 this will be <= 4, otherwise <= 3 like normal */ unsigned int rep; } ZSTD_Sequence; typedef struct { unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */ unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ unsigned hashLog; /**< dispatch table : larger == faster, more memory */ unsigned searchLog; /**< nb of searches : larger == more compression, slower */ unsigned minMatch; /**< match length searched : larger == faster decompression, sometimes less compression */ unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ ZSTD_strategy strategy; /**< see ZSTD_strategy definition above */ } ZSTD_compressionParameters; typedef struct { int contentSizeFlag; /**< 1: content size will be in frame header (when known) */ int checksumFlag; /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ int noDictIDFlag; /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ } ZSTD_frameParameters; typedef struct { ZSTD_compressionParameters cParams; ZSTD_frameParameters fParams; } ZSTD_parameters; typedef enum { ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ } ZSTD_dictContentType_e; typedef enum { ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */ ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ } ZSTD_dictLoadMethod_e; typedef enum { ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */ ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number. * Useful to save 4 bytes per generated frame. * Decoder cannot recognise automatically this format, requiring this instruction. */ } ZSTD_format_e; typedef enum { /* Note: this enum and the behavior it controls are effectively internal * implementation details of the compressor. They are expected to continue * to evolve and should be considered only in the context of extremely * advanced performance tuning. * * Zstd currently supports the use of a CDict in three ways: * * - The contents of the CDict can be copied into the working context. This * means that the compression can search both the dictionary and input * while operating on a single set of internal tables. This makes * the compression faster per-byte of input. However, the initial copy of * the CDict's tables incurs a fixed cost at the beginning of the * compression. For small compressions (< 8 KB), that copy can dominate * the cost of the compression. * * - The CDict's tables can be used in-place. In this model, compression is * slower per input byte, because the compressor has to search two sets of * tables. However, this model incurs no start-up cost (as long as the * working context's tables can be reused). For small inputs, this can be * faster than copying the CDict's tables. * * - The CDict's tables are not used at all, and instead we use the working * context alone to reload the dictionary and use params based on the source * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict(). * This method is effective when the dictionary sizes are very small relative * to the input size, and the input size is fairly large to begin with. * * Zstd has a simple internal heuristic that selects which strategy to use * at the beginning of a compression. However, if experimentation shows that * Zstd is making poor choices, it is possible to override that choice with * this enum. */ ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */ ZSTD_dictForceLoad = 3 /* Always reload the dictionary */ } ZSTD_dictAttachPref_e; typedef enum { ZSTD_lcm_auto = 0, /**< Automatically determine the compression mode based on the compression level. * Negative compression levels will be uncompressed, and positive compression * levels will be compressed. */ ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be * emitted if Huffman compression is not profitable. */ ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */ } ZSTD_literalCompressionMode_e; /*************************************** * Frame size functions ***************************************/ /*! ZSTD_findDecompressedSize() : * `src` should point to the start of a series of ZSTD encoded and/or skippable frames * `srcSize` must be the _exact_ size of this series * (i.e. there should be a frame boundary at `src + srcSize`) * @return : - decompressed size of all data in all successive frames * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN * - if an error occurred: ZSTD_CONTENTSIZE_ERROR * * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. * In which case, it's necessary to use streaming mode to decompress data. * note 2 : decompressed size is always present when compression is done with ZSTD_compress() * note 3 : decompressed size can be very large (64-bits value), * potentially larger than what local system can handle as a single memory segment. * In which case, it's necessary to use streaming mode to decompress data. * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. * Always ensure result fits within application's authorized limits. * Each application can set its own limits. * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to * read each contained frame header. This is fast as most of the data is skipped, * however it does mean that all frame data must be present and valid. */ ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_decompressBound() : * `src` should point to the start of a series of ZSTD encoded and/or skippable frames * `srcSize` must be the _exact_ size of this series * (i.e. there should be a frame boundary at `src + srcSize`) * @return : - upper-bound for the decompressed size of all data in all successive frames * - if an error occured: ZSTD_CONTENTSIZE_ERROR * * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame. * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`. * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value. * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: * upper-bound = # blocks * min(128 KB, Window_Size) */ ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); /*! ZSTD_frameHeaderSize() : * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); /*! ZSTD_getSequences() : * Extract sequences from the sequence store * zc can be used to insert custom compression params. * This function invokes ZSTD_compress2 * @return : number of sequences extracted */ ZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize); /*************************************** * Memory management ***************************************/ /*! ZSTD_estimate*() : * These functions make it possible to estimate memory usage * of a future {D,C}Ctx, before its creation. * * ZSTD_estimateCCtxSize() will provide a memory budget large enough * for any compression level up to selected one. * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate * does not include space for a window buffer. * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming. * The estimate will assume the input may be arbitrarily large, * which is the worst case. * * When srcSize can be bound by a known and rather "small" value, * this fact can be used to provide a tighter estimation * because the CCtx compression context will need less memory. * This tighter estimation can be provided by more advanced functions * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. * * Note 2 : only single-threaded compression is supported. * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. */ ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel); ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); /*! ZSTD_estimateCStreamSize() : * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. * It will also consider src size to be arbitrarily "large", which is worst case. * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. * Note : CStream size estimation is only correct for single-threaded compression. * ZSTD_DStream memory budget depends on window Size. * This information can be passed manually, using ZSTD_estimateDStreamSize, * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. * In this case, get total size by adding ZSTD_estimate?DictSize */ ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel); ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize); ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); /*! ZSTD_estimate?DictSize() : * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. */ ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); /*! ZSTD_initStatic*() : * Initialize an object using a pre-allocated fixed-size buffer. * workspace: The memory area to emplace the object into. * Provided pointer *must be 8-bytes aligned*. * Buffer must outlive object. * workspaceSize: Use ZSTD_estimate*Size() to determine * how large workspace must be to support target scenario. * @return : pointer to object (same address as workspace, just different type), * or NULL if error (size too small, incorrect alignment, etc.) * Note : zstd will never resize nor malloc() when using a static buffer. * If the object requires more memory than available, * zstd will just error out (typically ZSTD_error_memory_allocation). * Note 2 : there is no corresponding "free" function. * Since workspace is allocated externally, it must be freed externally too. * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level * into its associated cParams. * Limitation 1 : currently not compatible with internal dictionary creation, triggered by * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). * Limitation 2 : static cctx currently not compatible with multi-threading. * Limitation 3 : static dctx is incompatible with legacy support. */ ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams); ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! Custom memory allocation : * These prototypes make it possible to pass your own allocation/free functions. * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. * All allocation/free operations will be completed using these custom variants instead of regular ones. */ typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); typedef void (*ZSTD_freeFunction) (void* opaque, void* address); typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem); ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem); /*************************************** * Advanced compression functions ***************************************/ /*! ZSTD_createCDict_byReference() : * Create a digested dictionary for compression * Dictionary content is just referenced, not duplicated. * As a consequence, `dictBuffer` **must** outlive CDict, * and its content must remain unmodified throughout the lifetime of CDict. * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. * `estimatedSrcSize` value is optional, select 0 if not known */ ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_getParams() : * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_checkCParams() : * Ensure param values remain within authorized range. * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); /*! ZSTD_adjustCParams() : * optimize params for a given `srcSize` and `dictSize`. * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. * `dictSize` must be `0` when there is no dictionary. * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. * This function never fails (wide contract) */ ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); /*! ZSTD_compress_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params); /*! ZSTD_compress_usingCDict_advanced() : * Note : this function is now REDUNDANT. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. * This prototype will be marked as deprecated and generate compilation warning in some future version */ ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams); /*! ZSTD_CCtx_loadDictionary_byReference() : * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_loadDictionary_advanced() : * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_CCtx_refPrefix_advanced() : * Same as ZSTD_CCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /* === experimental parameters === */ /* these parameters can be used with ZSTD_setParameter() * they are not guaranteed to remain supported in the future */ /* Enables rsyncable mode, * which makes compressed files more rsync friendly * by adding periodic synchronization points to the compressed data. * The target average block size is ZSTD_c_jobSize / 2. * It's possible to modify the job size to increase or decrease * the granularity of the synchronization point. * Once the jobSize is smaller than the window size, * it will result in compression ratio degradation. * NOTE 1: rsyncable mode only works when multithreading is enabled. * NOTE 2: rsyncable performs poorly in combination with long range mode, * since it will decrease the effectiveness of synchronization points, * though mileage may vary. * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s. * If the selected compression level is already running significantly slower, * the overall speed won't be significantly impacted. */ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1 /* Select a compression format. * The value must be of type ZSTD_format_e. * See ZSTD_format_e enum definition for details */ #define ZSTD_c_format ZSTD_c_experimentalParam2 /* Force back-reference distances to remain < windowSize, * even when referencing into Dictionary content (default:0) */ #define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3 /* Controls whether the contents of a CDict * are used in place, or copied into the working context. * Accepts values from the ZSTD_dictAttachPref_e enum. * See the comments on that enum for an explanation of the feature. */ #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 /* Controls how the literals are compressed (default is auto). * The value must be of type ZSTD_literalCompressionMode_e. * See ZSTD_literalCompressionMode_t enum definition for details. */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 /* Tries to fit compressed block size to be around targetCBlockSize. * No target when targetCBlockSize == 0. * There is no guarantee on compressed block size (default:0) */ #define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6 /* User's best guess of source size. * Hint is not valid when srcSizeHint == 0. * There is no guarantee that hint is close to actual source size, * but compression ratio may regress significantly if guess considerably underestimates */ #define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7 /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_params : * Quick howto : * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into * an existing ZSTD_CCtx_params structure. * This is similar to * ZSTD_CCtx_setParameter(). * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to * an existing CCtx. * These parameters will be applied to * all subsequent frames. * - ZSTD_compressStream2() : Do compression using the CCtx. * - ZSTD_freeCCtxParams() : Free the memory. * * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() * for static allocation of CCtx for single-threaded compression. */ ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /*! ZSTD_CCtxParams_reset() : * Reset params to default values. */ ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); /*! ZSTD_CCtxParams_init() : * Initializes the compression parameters of cctxParams according to * compression level. All other parameters are reset to their default values. */ ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); /*! ZSTD_CCtxParams_init_advanced() : * Initializes the compression and frame parameters of cctxParams according to * params. All other parameters are reset to their default values. */ ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); /*! ZSTD_CCtxParams_setParameter() : * Similar to ZSTD_CCtx_setParameter. * Set one compression parameter, selected by enum ZSTD_cParameter. * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams(). * @result : 0, or an error code (which can be tested with ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); /*! ZSTD_CCtxParams_getParameter() : * Similar to ZSTD_CCtx_getParameter. * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. * @result : 0, or an error code (which can be tested with ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_setParametersUsingCCtxParams() : * Apply a set of ZSTD_CCtx_params to the compression context. * This can be done even after compression is started, * if nbWorkers==0, this will have no impact until a new compression is started. * if nbWorkers>=1, new parameters will be picked up at next job, * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). */ ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); /*! ZSTD_compressStream2_simpleArgs() : * Same as ZSTD_compressStream2(), * but using only integral types as arguments. * This variant might be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp); /*************************************** * Advanced decompression functions ***************************************/ /*! ZSTD_isFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier. * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, ready to start decompression operation without startup delay. * Dictionary content is referenced, and therefore stays in dictBuffer. * It is important that dictBuffer outlives DDict, * it must remain read accessible throughout the lifetime of DDict */ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_byReference() : * Same as ZSTD_DCtx_loadDictionary(), * but references `dict` content instead of copying it into `dctx`. * This saves memory if `dict` remains around., * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_advanced() : * Same as ZSTD_DCtx_loadDictionary(), * but gives direct control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_refPrefix_advanced() : * Same as ZSTD_DCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_setMaxWindowSize() : * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. * This protects a decoder context from reserving too much memory for itself (potential attack scenario). * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); /* ZSTD_d_format * experimental parameter, * allowing selection between ZSTD_format_e input compression formats */ #define ZSTD_d_format ZSTD_d_experimentalParam1 /* ZSTD_d_stableOutBuffer * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable. * * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same * between calls, except for the modifications that zstd makes to pos (the * caller must not modify pos). This is checked by the decompressor, and * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer * MUST be large enough to fit the entire decompressed frame. This will be * checked when the frame content size is known. The data in the ZSTD_outBuffer * in the range [dst, dst + pos) MUST not be modified during decompression * or you will get data corruption. * * When this flags is enabled zstd won't allocate an output buffer, because * it can write directly to the ZSTD_outBuffer, but it will still allocate * an input buffer large enough to fit any compressed block. This will also * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. * If you need to avoid the input buffer allocation use the buffer-less * streaming API. * * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using * this flag is ALWAYS memory safe, and will never access out-of-bounds * memory. However, decompression WILL fail if you violate the preconditions. * * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST * not be modified during decompression or you will get data corruption. This * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate * matches. Normally zstd maintains its own buffer for this purpose, but passing * this flag tells zstd to use the user provided buffer. */ #define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2 /*! ZSTD_DCtx_setFormat() : * Instruct the decoder context about what kind of data to decode next. * This instruction is mandatory to decode data without a fully-formed header, * such ZSTD_f_zstd1_magicless for example. * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); /*! ZSTD_decompressStream_simpleArgs() : * Same as ZSTD_decompressStream(), * but using only integral types as arguments. * This can be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos); /******************************************************************** * Advanced streaming functions * Warning : most of these functions are now redundant with the Advanced API. * Once Advanced API reaches "stable" status, * redundant functions will be deprecated, and then at some point removed. ********************************************************************/ /*===== Advanced Streaming compression functions =====*/ /**! ZSTD_initCStream_srcSize() : * This function is deprecated, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * * pledgedSrcSize must be correct. If it is not known at init time, use * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, * "0" also disables frame content size field. It may be enabled in the future. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**! ZSTD_initCStream_usingDict() : * This function is deprecated, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * * Creates of an internal CDict (incompatible with static CCtx), except if * dict == NULL or dictSize < 8, in which case no dict is used. * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**! ZSTD_initCStream_advanced() : * This function is deprecated, and is approximately equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * // Pseudocode: Set each zstd parameter and leave the rest as-is. * for ((param, value) : params) { * ZSTD_CCtx_setParameter(zcs, param, value); * } * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. * pledgedSrcSize must be correct. * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**! ZSTD_initCStream_usingCDict() : * This function is deprecated, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, cdict); * * note : cdict will just be referenced, and must outlive compression session * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**! ZSTD_initCStream_usingCDict_advanced() : * This function is DEPRECATED, and is approximately equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * // Pseudocode: Set each zstd frame parameter and leave the rest as-is. * for ((fParam, value) : fParams) { * ZSTD_CCtx_setParameter(zcs, fParam, value); * } * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_refCDict(zcs, cdict); * * same as ZSTD_initCStream_usingCDict(), with control over frame parameters. * pledgedSrcSize must be correct. If srcSize is not known at init time, use * value ZSTD_CONTENTSIZE_UNKNOWN. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /*! ZSTD_resetCStream() : * This function is deprecated, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * * start a new frame, using same parameters from previous frame. * This is typically useful to skip dictionary loading stage, since it will re-use it in-place. * Note that zcs must be init at least once before using ZSTD_resetCStream(). * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. * @return : 0, or an error code (which can be tested using ZSTD_isError()) * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); typedef struct { unsigned long long ingested; /* nb input bytes read and buffered */ unsigned long long consumed; /* nb input bytes actually compressed */ unsigned long long produced; /* nb of compressed bytes generated and buffered */ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ unsigned currentJobID; /* MT only : latest started job nb */ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */ } ZSTD_frameProgression; /* ZSTD_getFrameProgression() : * tells how much data has been ingested (read from input) * consumed (input actually compressed) and produced (output) for current frame. * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed. * Aggregates progression inside active worker threads. */ ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); /*! ZSTD_toFlushNow() : * Tell how many bytes are ready to be flushed immediately. * Useful for multithreading scenarios (nbWorkers >= 1). * Probe the oldest active job, defined as oldest job not yet entirely flushed, * and check its output buffer. * @return : amount of data stored in oldest job and ready to be flushed immediately. * if @return == 0, it means either : * + there is no active job (could be checked with ZSTD_frameProgression()), or * + oldest job is still actively compressing data, * but everything it has produced has also been flushed so far, * therefore flush speed is limited by production speed of oldest job * irrespective of the speed of concurrent (and newer) jobs. */ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); /*===== Advanced Streaming decompression functions =====*/ /** * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); * * note: no dictionary will be used if dict == NULL or dictSize < 8 * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /** * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_refDDict(zds, ddict); * * note : ddict is referenced, it must outlive decompression session * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /** * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * * re-use decompression parameters from previous init; saves dictionary loading * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /********************************************************************* * Buffer-less and synchronous inner streaming functions * * This is an advanced API, giving full control over buffer management, for users which need direct control over memory. * But it's also a complex one, with several restrictions, documented below. * Prefer normal streaming API for an easier experience. ********************************************************************* */ /** Buffer-less streaming compression (synchronous mode) A ZSTD_CCtx object is required to track streaming operations. Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. ZSTD_CCtx object can be re-used multiple times within successive compression operations. Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression, or ZSTD_compressBegin_advanced(), for finer parameter control. It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only. - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks. - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario. Worst case evaluation is provided by ZSTD_compressBound(). ZSTD_compressContinue() doesn't guarantee recover after a failed compression. - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog). It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks) - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps. In which case, it will "discard" the relevant memory section from its history. Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. */ /*===== Buffer-less streaming compression functions =====*/ ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*- Buffer-less streaming decompression (synchronous mode) A ZSTD_DCtx object is required to track streaming operations. Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. A ZSTD_DCtx object can be re-used multiple times. First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. Data fragment must be large enough to ensure successful decoding. `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. errorCode, which can be tested using ZSTD_isError(). It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. As a consequence, check that values remain within valid application range. For example, do not allocate memory blindly, check that `windowSize` is within expectation. Each application can set its own limits, depending on local restrictions. For extended interoperability, it is recommended to support `windowSize` of at least 8 MB. ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes. ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, or that previous contiguous segment is large enough to properly handle maximum back-reference distance. There are multiple ways to guarantee this condition. The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), which can @return an error code if required value is too large for current system (in 32-bits mode). In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. At which point, decoding can resume from the beginning of the buffer. Note that already decoded data stored in the buffer should be flushed before being overwritten. There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory. Finally, if you control the compression process, you can also ignore all buffer size rules, as long as the encoder and decoder progress in "lock-step", aka use exactly the same buffer sizes, break contiguity at the same place, etc. Once buffers are setup, start decompression, with ZSTD_decompressBegin(). If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict(). Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError(). A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. Context can then be reset to start a new decompression. Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType(). This information is not required to properly decode a frame. == Special case : skippable frames == Skippable frames allow integration of user-defined data into a flow of concatenated frames. Skippable frames will be ignored (skipped) by decompressor. The format of skippable frames is as follows : a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits c) Frame Content - any content (User Data) of length equal to Frame Size For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame. For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content. */ /*===== Buffer-less streaming decompression functions =====*/ typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; typedef struct { unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ unsigned blockSizeMax; ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ unsigned headerSize; unsigned dictID; unsigned checksumFlag; } ZSTD_frameHeader; /*! ZSTD_getFrameHeader() : * decode Frame Header, or requires larger `srcSize`. * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ /*! ZSTD_getFrameHeader_advanced() : * same as ZSTD_getFrameHeader(), * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); /* ============================ */ /** Block level API */ /* ============================ */ /*! Block functions produce and decode raw zstd blocks, without frame metadata. Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. A few rules to respect : - Compressing and decompressing require a context structure + Use ZSTD_createCCtx() and ZSTD_createDCtx() - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary + copyCCtx() and copyDCtx() can be used too - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks + For inputs larger than a single block, consider using regular ZSTD_compress() instead. Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block. - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) ! ===> In which case, nothing is produced into `dst` ! + User __must__ test for such outcome and deal directly with uncompressed data + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0. Doing so would mess up with statistics history, leading to potential data corruption. + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !! + In case of multiple successive blocks, should some of them be uncompressed, decoder must be informed of their existence in order to follow proper history. Use ZSTD_insertBlock() for such a case. */ /*===== Raw zstd block functions =====*/ ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/0000755000175000017500000000000013771325773023570 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/zdict.h0000644000175000017500000004402313771325506025053 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef DICTBUILDER_H_001 #define DICTBUILDER_H_001 #if defined (__cplusplus) extern "C" { #endif /*====== Dependencies ======*/ #include /* size_t */ /* ===== ZDICTLIB_API : control library symbols visibility ===== */ #ifndef ZDICTLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZDICTLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZDICTLIB_API ZDICTLIB_VISIBILITY #endif /*! ZDICT_trainFromBuffer(): * Train a dictionary from an array of samples. * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, * f=20, and accel=1. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * Note: Dictionary training will fail if there are not enough samples to construct a * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). * If dictionary training fails, you should use zstd without a dictionary, as the dictionary * would've been ineffective anyways. If you believe your samples would benefit from a dictionary * please open an issue with details, and we can look into it. * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); typedef struct { int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) */ } ZDICT_params_t; /*! ZDICT_finalizeDictionary(): * Given a custom content as a basis for dictionary, and a set of samples, * finalize dictionary by adding headers and statistics according to the zstd * dictionary format. * * Samples must be stored concatenated in a flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each * sample in order. The samples are used to construct the statistics, so they * should be representative of what you will compress with this dictionary. * * The compression level can be set in `parameters`. You should pass the * compression level you expect to use in production. The statistics for each * compression level differ, so tuning the dictionary for the compression level * can help quite a bit. * * You can set an explicit dictionary ID in `parameters`, or allow us to pick * a random dictionary ID for you, but we can't guarantee no collisions. * * The dstDictBuffer and the dictContent may overlap, and the content will be * appended to the end of the header. If the header + the content doesn't fit in * maxDictSize the beginning of the content is truncated to make room, since it * is presumed that the most profitable content is at the end of the dictionary, * since that is the cheapest to reference. * * `dictContentSize` must be >= ZDICT_CONTENTSIZE_MIN bytes. * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN). * * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), * or an error code, which can be tested by ZDICT_isError(). * Note: ZDICT_finalizeDictionary() will push notifications into stderr if * instructed to, using notificationLevel>0. * NOTE: This function currently may fail in several edge cases including: * * Not enough samples * * Samples are uncompressible * * Samples are all exactly the same */ ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dstDictBuffer, size_t maxDictSize, const void* dictContent, size_t dictContentSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t parameters); /*====== Helper functions ======*/ ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize); /* returns dict header size; returns a ZSTD error code on failure */ ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); #ifdef ZDICT_STATIC_LINKING_ONLY /* ==================================================================================== * The definitions in this section are considered experimental. * They should never be used with a dynamic library, as they may change in the future. * They are provided for advanced usages. * Use them only in association with static linking. * ==================================================================================== */ #define ZDICT_CONTENTSIZE_MIN 128 #define ZDICT_DICTSIZE_MIN 256 /*! ZDICT_cover_params_t: * k and d are the only required parameters. * For others, value 0 means default. */ typedef struct { unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ ZDICT_params_t zParams; } ZDICT_cover_params_t; typedef struct { unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ unsigned f; /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ ZDICT_params_t zParams; } ZDICT_fastCover_params_t; /*! ZDICT_trainFromBuffer_cover(): * Train a dictionary from an array of samples using the COVER algorithm. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters); /*! ZDICT_optimizeTrainFromBuffer_cover(): * The same requirements as above hold for all the parameters except `parameters`. * This function tries many parameter combinations and picks the best parameters. * `*parameters` is filled with the best parameters found, * dictionary constructed with those parameters is stored in `dictBuffer`. * * All of the parameters d, k, steps are optional. * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. * if steps is zero it defaults to its default value. * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. * * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * On success `*parameters` contains the parameters selected. * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. */ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters); /*! ZDICT_trainFromBuffer_fastCover(): * Train a dictionary from an array of samples using a modified version of COVER algorithm. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * d and k are required. * All other parameters are optional, will use default values if not provided * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters); /*! ZDICT_optimizeTrainFromBuffer_fastCover(): * The same requirements as above hold for all the parameters except `parameters`. * This function tries many parameter combinations (specifically, k and d combinations) * and picks the best parameters. `*parameters` is filled with the best parameters found, * dictionary constructed with those parameters is stored in `dictBuffer`. * All of the parameters d, k, steps, f, and accel are optional. * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. * if steps is zero it defaults to its default value. * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. * If f is zero, default value of 20 is used. * If accel is zero, default value of 1 is used. * * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * On success `*parameters` contains the parameters selected. * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. */ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters); typedef struct { unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ ZDICT_params_t zParams; } ZDICT_legacy_params_t; /*! ZDICT_trainFromBuffer_legacy(): * Train a dictionary from an array of samples. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * `parameters` is optional and can be provided with values set to 0 to mean "default". * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * See ZDICT_trainFromBuffer() for details on failure modes. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters); /* Deprecation warnings */ /* It is generally possible to disable deprecation warnings from compiler, for example with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS # define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ #else # define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API # elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) # elif (ZDICT_GCC_VERSION >= 301) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) # elif defined(_MSC_VER) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") # define ZDICT_DEPRECATED(message) ZDICTLIB_API # endif #endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); #endif /* ZDICT_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif #endif /* DICTBUILDER_H_001 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/fastcover.c0000644000175000017500000006746013771325506025737 0ustar useruser00000000000000/* * Copyright (c) 2018-2020, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include /* fprintf */ #include /* malloc, free, qsort */ #include /* memset */ #include /* clock */ #include "../common/mem.h" /* read */ #include "../common/pool.h" #include "../common/threading.h" #include "cover.h" #include "../common/zstd_internal.h" /* includes zstd.h */ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY #endif #include "zdict.h" /*-************************************* * Constants ***************************************/ #define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define FASTCOVER_MAX_F 31 #define FASTCOVER_MAX_ACCEL 10 #define DEFAULT_SPLITPOINT 0.75 #define DEFAULT_F 20 #define DEFAULT_ACCEL 1 /*-************************************* * Console display ***************************************/ static int g_displayLevel = 2; #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ if (displayLevel >= l) { \ if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ g_time = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; static clock_t g_time = 0; /*-************************************* * Hash Functions ***************************************/ static const U64 prime6bytes = 227718039650203ULL; static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } /** * Hash the d-byte value pointed to by p and mod 2^f */ static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) { if (d == 6) { return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1); } return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1); } /*-************************************* * Acceleration ***************************************/ typedef struct { unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */ unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */ } FASTCOVER_accel_t; static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = { { 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */ { 100, 0 }, /* accel = 1 */ { 50, 1 }, /* accel = 2 */ { 34, 2 }, /* accel = 3 */ { 25, 3 }, /* accel = 4 */ { 20, 4 }, /* accel = 5 */ { 17, 5 }, /* accel = 6 */ { 14, 6 }, /* accel = 7 */ { 13, 7 }, /* accel = 8 */ { 11, 8 }, /* accel = 9 */ { 10, 9 }, /* accel = 10 */ }; /*-************************************* * Context ***************************************/ typedef struct { const BYTE *samples; size_t *offsets; const size_t *samplesSizes; size_t nbSamples; size_t nbTrainSamples; size_t nbTestSamples; size_t nbDmers; U32 *freqs; unsigned d; unsigned f; FASTCOVER_accel_t accelParams; } FASTCOVER_ctx_t; /*-************************************* * Helper functions ***************************************/ /** * Selects the best segment in an epoch. * Segments of are scored according to the function: * * Let F(d) be the frequency of all dmers with hash value d. * Let S_i be hash value of the dmer at position i of segment S which has length k. * * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) * * Once the dmer with hash value d is in the dictionary we set F(d) = 0. */ static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx, U32 *freqs, U32 begin, U32 end, ZDICT_cover_params_t parameters, U16* segmentFreqs) { /* Constants */ const U32 k = parameters.k; const U32 d = parameters.d; const U32 f = ctx->f; const U32 dmersInK = k - d + 1; /* Try each segment (activeSegment) and save the best (bestSegment) */ COVER_segment_t bestSegment = {0, 0, 0}; COVER_segment_t activeSegment; /* Reset the activeDmers in the segment */ /* The activeSegment starts at the beginning of the epoch. */ activeSegment.begin = begin; activeSegment.end = begin; activeSegment.score = 0; /* Slide the activeSegment through the whole epoch. * Save the best segment in bestSegment. */ while (activeSegment.end < end) { /* Get hash value of current dmer */ const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); /* Add frequency of this index to score if this is the first occurrence of index in active segment */ if (segmentFreqs[idx] == 0) { activeSegment.score += freqs[idx]; } /* Increment end of segment and segmentFreqs*/ activeSegment.end += 1; segmentFreqs[idx] += 1; /* If the window is now too large, drop the first position */ if (activeSegment.end - activeSegment.begin == dmersInK + 1) { /* Get hash value of the dmer to be eliminated from active segment */ const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); segmentFreqs[delIndex] -= 1; /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */ if (segmentFreqs[delIndex] == 0) { activeSegment.score -= freqs[delIndex]; } /* Increment start of segment */ activeSegment.begin += 1; } /* If this segment is the best so far save it */ if (activeSegment.score > bestSegment.score) { bestSegment = activeSegment; } } /* Zero out rest of segmentFreqs array */ while (activeSegment.begin < end) { const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); segmentFreqs[delIndex] -= 1; activeSegment.begin += 1; } { /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); freqs[i] = 0; } } return bestSegment; } static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, size_t maxDictSize, unsigned f, unsigned accel) { /* k, d, and f are required parameters */ if (parameters.d == 0 || parameters.k == 0) { return 0; } /* d has to be 6 or 8 */ if (parameters.d != 6 && parameters.d != 8) { return 0; } /* k <= maxDictSize */ if (parameters.k > maxDictSize) { return 0; } /* d <= k */ if (parameters.d > parameters.k) { return 0; } /* 0 < f <= FASTCOVER_MAX_F*/ if (f > FASTCOVER_MAX_F || f == 0) { return 0; } /* 0 < splitPoint <= 1 */ if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) { return 0; } /* 0 < accel <= 10 */ if (accel > 10 || accel == 0) { return 0; } return 1; } /** * Clean up a context initialized with `FASTCOVER_ctx_init()`. */ static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) { if (!ctx) return; free(ctx->freqs); ctx->freqs = NULL; free(ctx->offsets); ctx->offsets = NULL; } /** * Calculate for frequency of hash value of each dmer in ctx->samples */ static void FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) { const unsigned f = ctx->f; const unsigned d = ctx->d; const unsigned skip = ctx->accelParams.skip; const unsigned readLength = MAX(d, 8); size_t i; assert(ctx->nbTrainSamples >= 5); assert(ctx->nbTrainSamples <= ctx->nbSamples); for (i = 0; i < ctx->nbTrainSamples; i++) { size_t start = ctx->offsets[i]; /* start of current dmer */ size_t const currSampleEnd = ctx->offsets[i+1]; while (start + readLength <= currSampleEnd) { const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); freqs[dmerIndex]++; start = start + skip + 1; } } } /** * Prepare a context for dictionary building. * The context is only dependent on the parameter `d` and can used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `FASTCOVER_ctx_destroy()`. */ static size_t FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, unsigned d, double splitPoint, unsigned f, FASTCOVER_accel_t accelParams) { const BYTE* const samples = (const BYTE*)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Split samples into testing and training sets */ const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); return ERROR(srcSize_wrong); } /* Check if there are at least 5 training samples */ if (nbTrainSamples < 5) { DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples); return ERROR(srcSize_wrong); } /* Check if there's testing sample */ if (nbTestSamples < 1) { DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples); return ERROR(srcSize_wrong); } /* Zero the context */ memset(ctx, 0, sizeof(*ctx)); DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, (unsigned)trainingSamplesSize); DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, (unsigned)testSamplesSize); ctx->samples = samples; ctx->samplesSizes = samplesSizes; ctx->nbSamples = nbSamples; ctx->nbTrainSamples = nbTrainSamples; ctx->nbTestSamples = nbTestSamples; ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; ctx->d = d; ctx->f = f; ctx->accelParams = accelParams; /* The offsets of each file */ ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t)); if (ctx->offsets == NULL) { DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n"); FASTCOVER_ctx_destroy(ctx); return ERROR(memory_allocation); } /* Fill offsets from the samplesSizes */ { U32 i; ctx->offsets[0] = 0; assert(nbSamples >= 5); for (i = 1; i <= nbSamples; ++i) { ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; } } /* Initialize frequency array of size 2^f */ ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32)); if (ctx->freqs == NULL) { DISPLAYLEVEL(1, "Failed to allocate frequency table \n"); FASTCOVER_ctx_destroy(ctx); return ERROR(memory_allocation); } DISPLAYLEVEL(2, "Computing frequencies\n"); FASTCOVER_computeFrequency(ctx->freqs, ctx); return 0; } /** * Given the prepared context build the dictionary. */ static size_t FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, U32* freqs, void* dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t parameters, U16* segmentFreqs) { BYTE *const dict = (BYTE *)dictBuffer; size_t tail = dictBufferCapacity; /* Divide the data into epochs. We will select one segment from each epoch. */ const COVER_epoch_info_t epochs = COVER_computeEpochs( (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1); const size_t maxZeroScoreRun = 10; size_t zeroScoreRun = 0; size_t epoch; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); /* Loop through the epochs until there are no more segments or the dictionary * is full. */ for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) { const U32 epochBegin = (U32)(epoch * epochs.size); const U32 epochEnd = epochBegin + epochs.size; size_t segmentSize; /* Select a segment */ COVER_segment_t segment = FASTCOVER_selectSegment( ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); /* If the segment covers no dmers, then we are out of content. * There may be new content in other epochs, for continue for some time. */ if (segment.score == 0) { if (++zeroScoreRun >= maxZeroScoreRun) { break; } continue; } zeroScoreRun = 0; /* Trim the segment if necessary and if it is too small then we are done */ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); if (segmentSize < parameters.d) { break; } /* We fill the dictionary from the back to allow the best segments to be * referenced with the smallest offsets. */ tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } DISPLAYLEVEL(2, "\r%79s\r", ""); return tail; } /** * Parameters for FASTCOVER_tryParameters(). */ typedef struct FASTCOVER_tryParameters_data_s { const FASTCOVER_ctx_t* ctx; COVER_best_t* best; size_t dictBufferCapacity; ZDICT_cover_params_t parameters; } FASTCOVER_tryParameters_data_t; /** * Tries a set of parameters and updates the COVER_best_t with the results. * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ static void FASTCOVER_tryParameters(void *opaque) { /* Save parameters as local variables */ FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque; const FASTCOVER_ctx_t *const ctx = data->ctx; const ZDICT_cover_params_t parameters = data->parameters; size_t dictBufferCapacity = data->dictBufferCapacity; size_t totalCompressedSize = ERROR(GENERIC); /* Initialize array to keep track of frequency of dmer within activeSegment */ U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16)); /* Allocate space for hash table, dict, and freqs */ BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); if (!segmentFreqs || !dict || !freqs) { DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); goto _cleanup; } /* Copy the frequencies because we need to modify them */ memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32)); /* Build the dictionary */ { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, parameters, segmentFreqs); const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, totalCompressedSize); if (COVER_dictSelectionIsError(selection)) { DISPLAYLEVEL(1, "Failed to select dictionary\n"); goto _cleanup; } } _cleanup: free(dict); COVER_best_finish(data->best, parameters, selection); free(data); free(segmentFreqs); COVER_dictSelectionFree(selection); free(freqs); } static void FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, ZDICT_cover_params_t* coverParams) { coverParams->k = fastCoverParams.k; coverParams->d = fastCoverParams.d; coverParams->steps = fastCoverParams.steps; coverParams->nbThreads = fastCoverParams.nbThreads; coverParams->splitPoint = fastCoverParams.splitPoint; coverParams->zParams = fastCoverParams.zParams; coverParams->shrinkDict = fastCoverParams.shrinkDict; } static void FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, ZDICT_fastCover_params_t* fastCoverParams, unsigned f, unsigned accel) { fastCoverParams->k = coverParams.k; fastCoverParams->d = coverParams.d; fastCoverParams->steps = coverParams.steps; fastCoverParams->nbThreads = coverParams.nbThreads; fastCoverParams->splitPoint = coverParams.splitPoint; fastCoverParams->f = f; fastCoverParams->accel = accel; fastCoverParams->zParams = coverParams.zParams; fastCoverParams->shrinkDict = coverParams.shrinkDict; } ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters) { BYTE* const dict = (BYTE*)dictBuffer; FASTCOVER_ctx_t ctx; ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; /* Initialize global data */ g_displayLevel = parameters.zParams.notificationLevel; /* Assign splitPoint and f if not provided */ parameters.splitPoint = 1.0; parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel; /* Convert to cover parameter */ memset(&coverParams, 0 , sizeof(coverParams)); FASTCOVER_convertToCoverParams(parameters, &coverParams); /* Checks */ if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, parameters.accel)) { DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } /* Assign corresponding FASTCOVER_accel_t to accelParams*/ accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; /* Initialize context */ { size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, coverParams.d, parameters.splitPoint, parameters.f, accelParams); if (ZSTD_isError(initVal)) { DISPLAYLEVEL(1, "Failed to initialize context\n"); return initVal; } } COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); /* Build the dictionary */ DISPLAYLEVEL(2, "Building dictionary\n"); { /* Initialize array to keep track of frequency of dmer within activeSegment */ U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16)); const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, dictBufferCapacity, coverParams, segmentFreqs); const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); const size_t dictionarySize = ZDICT_finalizeDictionary( dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); if (!ZSTD_isError(dictionarySize)) { DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", (unsigned)dictionarySize); } FASTCOVER_ctx_destroy(&ctx); free(segmentFreqs); return dictionarySize; } } ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters) { ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; /* constants */ const unsigned nbThreads = parameters->nbThreads; const double splitPoint = parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); const unsigned kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f; const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; const unsigned shrinkDict = 0; /* Local variables */ const int displayLevel = parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); return ERROR(parameter_outOfBound); } if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } if (nbThreads > 1) { pool = POOL_create(nbThreads, 1); if (!pool) { return ERROR(memory_allocation); } } /* Initialization */ COVER_best_init(&best); memset(&coverParams, 0 , sizeof(coverParams)); FASTCOVER_convertToCoverParams(*parameters, &coverParams); accelParams = FASTCOVER_defaultAccelParameters[accel]; /* Turn down global display level to clean up display at level 2 and below */ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ FASTCOVER_ctx_t ctx; LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); { size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); if (ZSTD_isError(initVal)) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; } } if (!warned) { COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); warned = 1; } /* Loop through k reusing the same context */ for (k = kMinK; k <= kMaxK; k += kStepSize) { /* Prepare the arguments */ FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( sizeof(FASTCOVER_tryParameters_data_t)); LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); if (!data) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); FASTCOVER_ctx_destroy(&ctx); POOL_free(pool); return ERROR(memory_allocation); } data->ctx = &ctx; data->best = &best; data->dictBufferCapacity = dictBufferCapacity; data->parameters = coverParams; data->parameters.k = k; data->parameters.d = d; data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; data->parameters.zParams.notificationLevel = g_displayLevel; /* Check the parameters */ if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel)) { DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); free(data); continue; } /* Call the function and pass ownership of data to it */ COVER_best_start(&best); if (pool) { POOL_add(pool, &FASTCOVER_tryParameters, data); } else { FASTCOVER_tryParameters(data); } /* Print status */ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); FASTCOVER_ctx_destroy(&ctx); } LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; if (ZSTD_isError(best.compressedSize)) { const size_t compressedSize = best.compressedSize; COVER_best_destroy(&best); POOL_free(pool); return compressedSize; } FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); memcpy(dictBuffer, best.dict, dictSize); COVER_best_destroy(&best); POOL_free(pool); return dictSize; } } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/cover.c0000644000175000017500000012232013771325506025044 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ***************************************************************************** * Constructs a dictionary using a heuristic based on the following paper: * * Liao, Petri, Moffat, Wirth * Effective Construction of Relative Lempel-Ziv Dictionaries * Published in WWW 2016. * * Adapted from code originally written by @ot (Giuseppe Ottaviano). ******************************************************************************/ /*-************************************* * Dependencies ***************************************/ #include /* fprintf */ #include /* malloc, free, qsort */ #include /* memset */ #include /* clock */ #include "../common/mem.h" /* read */ #include "../common/pool.h" #include "../common/threading.h" #include "cover.h" #include "../common/zstd_internal.h" /* includes zstd.h */ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY #endif #include "zdict.h" /*-************************************* * Constants ***************************************/ #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define DEFAULT_SPLITPOINT 1.0 /*-************************************* * Console display ***************************************/ static int g_displayLevel = 2; #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ if (displayLevel >= l) { \ if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ g_time = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; static clock_t g_time = 0; /*-************************************* * Hash table *************************************** * A small specialized hash map for storing activeDmers. * The map does not resize, so if it becomes full it will loop forever. * Thus, the map must be large enough to store every value. * The map implements linear probing and keeps its load less than 0.5. */ #define MAP_EMPTY_VALUE ((U32)-1) typedef struct COVER_map_pair_t_s { U32 key; U32 value; } COVER_map_pair_t; typedef struct COVER_map_s { COVER_map_pair_t *data; U32 sizeLog; U32 size; U32 sizeMask; } COVER_map_t; /** * Clear the map. */ static void COVER_map_clear(COVER_map_t *map) { memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); } /** * Initializes a map of the given size. * Returns 1 on success and 0 on failure. * The map must be destroyed with COVER_map_destroy(). * The map is only guaranteed to be large enough to hold size elements. */ static int COVER_map_init(COVER_map_t *map, U32 size) { map->sizeLog = ZSTD_highbit32(size) + 2; map->size = (U32)1 << map->sizeLog; map->sizeMask = map->size - 1; map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); if (!map->data) { map->sizeLog = 0; map->size = 0; return 0; } COVER_map_clear(map); return 1; } /** * Internal hash function */ static const U32 prime4bytes = 2654435761U; static U32 COVER_map_hash(COVER_map_t *map, U32 key) { return (key * prime4bytes) >> (32 - map->sizeLog); } /** * Helper function that returns the index that a key should be placed into. */ static U32 COVER_map_index(COVER_map_t *map, U32 key) { const U32 hash = COVER_map_hash(map, key); U32 i; for (i = hash;; i = (i + 1) & map->sizeMask) { COVER_map_pair_t *pos = &map->data[i]; if (pos->value == MAP_EMPTY_VALUE) { return i; } if (pos->key == key) { return i; } } } /** * Returns the pointer to the value for key. * If key is not in the map, it is inserted and the value is set to 0. * The map must not be full. */ static U32 *COVER_map_at(COVER_map_t *map, U32 key) { COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; if (pos->value == MAP_EMPTY_VALUE) { pos->key = key; pos->value = 0; } return &pos->value; } /** * Deletes key from the map if present. */ static void COVER_map_remove(COVER_map_t *map, U32 key) { U32 i = COVER_map_index(map, key); COVER_map_pair_t *del = &map->data[i]; U32 shift = 1; if (del->value == MAP_EMPTY_VALUE) { return; } for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { COVER_map_pair_t *const pos = &map->data[i]; /* If the position is empty we are done */ if (pos->value == MAP_EMPTY_VALUE) { del->value = MAP_EMPTY_VALUE; return; } /* If pos can be moved to del do so */ if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { del->key = pos->key; del->value = pos->value; del = pos; shift = 1; } else { ++shift; } } } /** * Destroys a map that is inited with COVER_map_init(). */ static void COVER_map_destroy(COVER_map_t *map) { if (map->data) { free(map->data); } map->data = NULL; map->size = 0; } /*-************************************* * Context ***************************************/ typedef struct { const BYTE *samples; size_t *offsets; const size_t *samplesSizes; size_t nbSamples; size_t nbTrainSamples; size_t nbTestSamples; U32 *suffix; size_t suffixSize; U32 *freqs; U32 *dmerAt; unsigned d; } COVER_ctx_t; /* We need a global context for qsort... */ static COVER_ctx_t *g_ctx = NULL; /*-************************************* * Helper functions ***************************************/ /** * Returns the sum of the sample sizes. */ size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { size_t sum = 0; unsigned i; for (i = 0; i < nbSamples; ++i) { sum += samplesSizes[i]; } return sum; } /** * Returns -1 if the dmer at lp is less than the dmer at rp. * Return 0 if the dmers at lp and rp are equal. * Returns 1 if the dmer at lp is greater than the dmer at rp. */ static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { U32 const lhs = *(U32 const *)lp; U32 const rhs = *(U32 const *)rp; return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); } /** * Faster version for d <= 8. */ static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1); U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask; U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask; if (lhs < rhs) { return -1; } return (lhs > rhs); } /** * Same as COVER_cmp() except ties are broken by pointer value * NOTE: g_ctx must be set to call this function. A global is required because * qsort doesn't take an opaque pointer. */ static int COVER_strict_cmp(const void *lp, const void *rp) { int result = COVER_cmp(g_ctx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } /** * Faster version for d <= 8. */ static int COVER_strict_cmp8(const void *lp, const void *rp) { int result = COVER_cmp8(g_ctx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } /** * Returns the first pointer in [first, last) whose element does not compare * less than value. If no such element exists it returns last. */ static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, size_t value) { size_t count = last - first; while (count != 0) { size_t step = count / 2; const size_t *ptr = first; ptr += step; if (*ptr < value) { first = ++ptr; count -= step + 1; } else { count = step; } } return first; } /** * Generic groupBy function. * Groups an array sorted by cmp into groups with equivalent values. * Calls grp for each group. */ static void COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, int (*cmp)(COVER_ctx_t *, const void *, const void *), void (*grp)(COVER_ctx_t *, const void *, const void *)) { const BYTE *ptr = (const BYTE *)data; size_t num = 0; while (num < count) { const BYTE *grpEnd = ptr + size; ++num; while (num < count && cmp(ctx, ptr, grpEnd) == 0) { grpEnd += size; ++num; } grp(ctx, ptr, grpEnd); ptr = grpEnd; } } /*-************************************* * Cover functions ***************************************/ /** * Called on each group of positions with the same dmer. * Counts the frequency of each dmer and saves it in the suffix array. * Fills `ctx->dmerAt`. */ static void COVER_group(COVER_ctx_t *ctx, const void *group, const void *groupEnd) { /* The group consists of all the positions with the same first d bytes. */ const U32 *grpPtr = (const U32 *)group; const U32 *grpEnd = (const U32 *)groupEnd; /* The dmerId is how we will reference this dmer. * This allows us to map the whole dmer space to a much smaller space, the * size of the suffix array. */ const U32 dmerId = (U32)(grpPtr - ctx->suffix); /* Count the number of samples this dmer shows up in */ U32 freq = 0; /* Details */ const size_t *curOffsetPtr = ctx->offsets; const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a * different sample than the last. */ size_t curSampleEnd = ctx->offsets[0]; for (; grpPtr != grpEnd; ++grpPtr) { /* Save the dmerId for this position so we can get back to it. */ ctx->dmerAt[*grpPtr] = dmerId; /* Dictionaries only help for the first reference to the dmer. * After that zstd can reference the match from the previous reference. * So only count each dmer once for each sample it is in. */ if (*grpPtr < curSampleEnd) { continue; } freq += 1; /* Binary search to find the end of the sample *grpPtr is in. * In the common case that grpPtr + 1 == grpEnd we can skip the binary * search because the loop is over. */ if (grpPtr + 1 != grpEnd) { const size_t *sampleEndPtr = COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); curSampleEnd = *sampleEndPtr; curOffsetPtr = sampleEndPtr + 1; } } /* At this point we are never going to look at this segment of the suffix * array again. We take advantage of this fact to save memory. * We store the frequency of the dmer in the first position of the group, * which is dmerId. */ ctx->suffix[dmerId] = freq; } /** * Selects the best segment in an epoch. * Segments of are scored according to the function: * * Let F(d) be the frequency of dmer d. * Let S_i be the dmer at position i of segment S which has length k. * * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) * * Once the dmer d is in the dictionary we set F(d) = 0. */ static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, U32 begin, U32 end, ZDICT_cover_params_t parameters) { /* Constants */ const U32 k = parameters.k; const U32 d = parameters.d; const U32 dmersInK = k - d + 1; /* Try each segment (activeSegment) and save the best (bestSegment) */ COVER_segment_t bestSegment = {0, 0, 0}; COVER_segment_t activeSegment; /* Reset the activeDmers in the segment */ COVER_map_clear(activeDmers); /* The activeSegment starts at the beginning of the epoch. */ activeSegment.begin = begin; activeSegment.end = begin; activeSegment.score = 0; /* Slide the activeSegment through the whole epoch. * Save the best segment in bestSegment. */ while (activeSegment.end < end) { /* The dmerId for the dmer at the next position */ U32 newDmer = ctx->dmerAt[activeSegment.end]; /* The entry in activeDmers for this dmerId */ U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); /* If the dmer isn't already present in the segment add its score. */ if (*newDmerOcc == 0) { /* The paper suggest using the L-0.5 norm, but experiments show that it * doesn't help. */ activeSegment.score += freqs[newDmer]; } /* Add the dmer to the segment */ activeSegment.end += 1; *newDmerOcc += 1; /* If the window is now too large, drop the first position */ if (activeSegment.end - activeSegment.begin == dmersInK + 1) { U32 delDmer = ctx->dmerAt[activeSegment.begin]; U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); activeSegment.begin += 1; *delDmerOcc -= 1; /* If this is the last occurrence of the dmer, subtract its score */ if (*delDmerOcc == 0) { COVER_map_remove(activeDmers, delDmer); activeSegment.score -= freqs[delDmer]; } } /* If this segment is the best so far save it */ if (activeSegment.score > bestSegment.score) { bestSegment = activeSegment; } } { /* Trim off the zero frequency head and tail from the segment. */ U32 newBegin = bestSegment.end; U32 newEnd = bestSegment.begin; U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { U32 freq = freqs[ctx->dmerAt[pos]]; if (freq != 0) { newBegin = MIN(newBegin, pos); newEnd = pos + 1; } } bestSegment.begin = newBegin; bestSegment.end = newEnd; } { /* Zero out the frequency of each dmer covered by the chosen segment. */ U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { freqs[ctx->dmerAt[pos]] = 0; } } return bestSegment; } /** * Check the validity of the parameters. * Returns non-zero if the parameters are valid and 0 otherwise. */ static int COVER_checkParameters(ZDICT_cover_params_t parameters, size_t maxDictSize) { /* k and d are required parameters */ if (parameters.d == 0 || parameters.k == 0) { return 0; } /* k <= maxDictSize */ if (parameters.k > maxDictSize) { return 0; } /* d <= k */ if (parameters.d > parameters.k) { return 0; } /* 0 < splitPoint <= 1 */ if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){ return 0; } return 1; } /** * Clean up a context initialized with `COVER_ctx_init()`. */ static void COVER_ctx_destroy(COVER_ctx_t *ctx) { if (!ctx) { return; } if (ctx->suffix) { free(ctx->suffix); ctx->suffix = NULL; } if (ctx->freqs) { free(ctx->freqs); ctx->freqs = NULL; } if (ctx->dmerAt) { free(ctx->dmerAt); ctx->dmerAt = NULL; } if (ctx->offsets) { free(ctx->offsets); ctx->offsets = NULL; } } /** * Prepare a context for dictionary building. * The context is only dependent on the parameter `d` and can used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `COVER_ctx_destroy()`. */ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, unsigned d, double splitPoint) { const BYTE *const samples = (const BYTE *)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Split samples into testing and training sets */ const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); return ERROR(srcSize_wrong); } /* Check if there are at least 5 training samples */ if (nbTrainSamples < 5) { DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples); return ERROR(srcSize_wrong); } /* Check if there's testing sample */ if (nbTestSamples < 1) { DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples); return ERROR(srcSize_wrong); } /* Zero the context */ memset(ctx, 0, sizeof(*ctx)); DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, (unsigned)trainingSamplesSize); DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, (unsigned)testSamplesSize); ctx->samples = samples; ctx->samplesSizes = samplesSizes; ctx->nbSamples = nbSamples; ctx->nbTrainSamples = nbTrainSamples; ctx->nbTestSamples = nbTestSamples; /* Partial suffix array */ ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); /* Maps index to the dmerID */ ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); /* The offsets of each file */ ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); COVER_ctx_destroy(ctx); return ERROR(memory_allocation); } ctx->freqs = NULL; ctx->d = d; /* Fill offsets from the samplesSizes */ { U32 i; ctx->offsets[0] = 0; for (i = 1; i <= nbSamples; ++i) { ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; } } DISPLAYLEVEL(2, "Constructing partial suffix array\n"); { /* suffix is a partial suffix array. * It only sorts suffixes by their first parameters.d bytes. * The sort is stable, so each dmer group is sorted by position in input. */ U32 i; for (i = 0; i < ctx->suffixSize; ++i) { ctx->suffix[i] = i; } /* qsort doesn't take an opaque pointer, so pass as a global. * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is. */ g_ctx = ctx; #if defined(__OpenBSD__) mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32), (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); #else qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); #endif } DISPLAYLEVEL(2, "Computing frequencies\n"); /* For each dmer group (group of positions with the same first d bytes): * 1. For each position we set dmerAt[position] = dmerID. The dmerID is * (groupBeginPtr - suffix). This allows us to go from position to * dmerID so we can look up values in freq. * 2. We calculate how many samples the dmer occurs in and save it in * freqs[dmerId]. */ COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group); ctx->freqs = ctx->suffix; ctx->suffix = NULL; return 0; } void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel) { const double ratio = (double)nbDmers / maxDictSize; if (ratio >= 10) { return; } LOCALDISPLAYLEVEL(displayLevel, 1, "WARNING: The maximum dictionary size %u is too large " "compared to the source size %u! " "size(source)/size(dictionary) = %f, but it should be >= " "10! This may lead to a subpar dictionary! We recommend " "training on sources at least 10x, and preferably 100x " "the size of the dictionary! \n", (U32)maxDictSize, (U32)nbDmers, ratio); } COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers, U32 k, U32 passes) { const U32 minEpochSize = k * 10; COVER_epoch_info_t epochs; epochs.num = MAX(1, maxDictSize / k / passes); epochs.size = nbDmers / epochs.num; if (epochs.size >= minEpochSize) { assert(epochs.size * epochs.num <= nbDmers); return epochs; } epochs.size = MIN(minEpochSize, nbDmers); epochs.num = nbDmers / epochs.size; assert(epochs.size * epochs.num <= nbDmers); return epochs; } /** * Given the prepared context build the dictionary. */ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, void *dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t parameters) { BYTE *const dict = (BYTE *)dictBuffer; size_t tail = dictBufferCapacity; /* Divide the data into epochs. We will select one segment from each epoch. */ const COVER_epoch_info_t epochs = COVER_computeEpochs( (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4); const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3)); size_t zeroScoreRun = 0; size_t epoch; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); /* Loop through the epochs until there are no more segments or the dictionary * is full. */ for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) { const U32 epochBegin = (U32)(epoch * epochs.size); const U32 epochEnd = epochBegin + epochs.size; size_t segmentSize; /* Select a segment */ COVER_segment_t segment = COVER_selectSegment( ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); /* If the segment covers no dmers, then we are out of content. * There may be new content in other epochs, for continue for some time. */ if (segment.score == 0) { if (++zeroScoreRun >= maxZeroScoreRun) { break; } continue; } zeroScoreRun = 0; /* Trim the segment if necessary and if it is too small then we are done */ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); if (segmentSize < parameters.d) { break; } /* We fill the dictionary from the back to allow the best segments to be * referenced with the smallest offsets. */ tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } DISPLAYLEVEL(2, "\r%79s\r", ""); return tail; } ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters) { BYTE* const dict = (BYTE*)dictBuffer; COVER_ctx_t ctx; COVER_map_t activeDmers; parameters.splitPoint = 1.0; /* Initialize global data */ g_displayLevel = parameters.zParams.notificationLevel; /* Checks */ if (!COVER_checkParameters(parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { DISPLAYLEVEL(1, "Cover must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } /* Initialize context and activeDmers */ { size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, parameters.d, parameters.splitPoint); if (ZSTD_isError(initVal)) { return initVal; } } COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel); if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); COVER_ctx_destroy(&ctx); return ERROR(memory_allocation); } DISPLAYLEVEL(2, "Building dictionary\n"); { const size_t tail = COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, dictBufferCapacity, parameters); const size_t dictionarySize = ZDICT_finalizeDictionary( dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbSamples, parameters.zParams); if (!ZSTD_isError(dictionarySize)) { DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", (unsigned)dictionarySize); } COVER_ctx_destroy(&ctx); COVER_map_destroy(&activeDmers); return dictionarySize; } } size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, const size_t *samplesSizes, const BYTE *samples, size_t *offsets, size_t nbTrainSamples, size_t nbSamples, BYTE *const dict, size_t dictBufferCapacity) { size_t totalCompressedSize = ERROR(GENERIC); /* Pointers */ ZSTD_CCtx *cctx; ZSTD_CDict *cdict; void *dst; /* Local variables */ size_t dstCapacity; size_t i; /* Allocate dst with enough space to compress the maximum sized sample */ { size_t maxSampleSize = 0; i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { maxSampleSize = MAX(samplesSizes[i], maxSampleSize); } dstCapacity = ZSTD_compressBound(maxSampleSize); dst = malloc(dstCapacity); } /* Create the cctx and cdict */ cctx = ZSTD_createCCtx(); cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel); if (!dst || !cctx || !cdict) { goto _compressCleanup; } /* Compress each sample and sum their sizes (or error) */ totalCompressedSize = dictBufferCapacity; i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { const size_t size = ZSTD_compress_usingCDict( cctx, dst, dstCapacity, samples + offsets[i], samplesSizes[i], cdict); if (ZSTD_isError(size)) { totalCompressedSize = size; goto _compressCleanup; } totalCompressedSize += size; } _compressCleanup: ZSTD_freeCCtx(cctx); ZSTD_freeCDict(cdict); if (dst) { free(dst); } return totalCompressedSize; } /** * Initialize the `COVER_best_t`. */ void COVER_best_init(COVER_best_t *best) { if (best==NULL) return; /* compatible with init on NULL */ (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); (void)ZSTD_pthread_cond_init(&best->cond, NULL); best->liveJobs = 0; best->dict = NULL; best->dictSize = 0; best->compressedSize = (size_t)-1; memset(&best->parameters, 0, sizeof(best->parameters)); } /** * Wait until liveJobs == 0. */ void COVER_best_wait(COVER_best_t *best) { if (!best) { return; } ZSTD_pthread_mutex_lock(&best->mutex); while (best->liveJobs != 0) { ZSTD_pthread_cond_wait(&best->cond, &best->mutex); } ZSTD_pthread_mutex_unlock(&best->mutex); } /** * Call COVER_best_wait() and then destroy the COVER_best_t. */ void COVER_best_destroy(COVER_best_t *best) { if (!best) { return; } COVER_best_wait(best); if (best->dict) { free(best->dict); } ZSTD_pthread_mutex_destroy(&best->mutex); ZSTD_pthread_cond_destroy(&best->cond); } /** * Called when a thread is about to be launched. * Increments liveJobs. */ void COVER_best_start(COVER_best_t *best) { if (!best) { return; } ZSTD_pthread_mutex_lock(&best->mutex); ++best->liveJobs; ZSTD_pthread_mutex_unlock(&best->mutex); } /** * Called when a thread finishes executing, both on error or success. * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, COVER_dictSelection_t selection) { void* dict = selection.dictContent; size_t compressedSize = selection.totalCompressedSize; size_t dictSize = selection.dictSize; if (!best) { return; } { size_t liveJobs; ZSTD_pthread_mutex_lock(&best->mutex); --best->liveJobs; liveJobs = best->liveJobs; /* If the new dictionary is better */ if (compressedSize < best->compressedSize) { /* Allocate space if necessary */ if (!best->dict || best->dictSize < dictSize) { if (best->dict) { free(best->dict); } best->dict = malloc(dictSize); if (!best->dict) { best->compressedSize = ERROR(GENERIC); best->dictSize = 0; ZSTD_pthread_cond_signal(&best->cond); ZSTD_pthread_mutex_unlock(&best->mutex); return; } } /* Save the dictionary, parameters, and size */ if (dict) { memcpy(best->dict, dict, dictSize); best->dictSize = dictSize; best->parameters = parameters; best->compressedSize = compressedSize; } } if (liveJobs == 0) { ZSTD_pthread_cond_broadcast(&best->cond); } ZSTD_pthread_mutex_unlock(&best->mutex); } } COVER_dictSelection_t COVER_dictSelectionError(size_t error) { COVER_dictSelection_t selection = { NULL, 0, error }; return selection; } unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) { return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent); } void COVER_dictSelectionFree(COVER_dictSelection_t selection){ free(selection.dictContent); } COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) { size_t largestDict = 0; size_t largestCompressed = 0; BYTE* customDictContentEnd = customDictContent + dictContentSize; BYTE * largestDictbuffer = (BYTE *)malloc(dictContentSize); BYTE * candidateDictBuffer = (BYTE *)malloc(dictContentSize); double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00; if (!largestDictbuffer || !candidateDictBuffer) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(dictContentSize); } /* Initial dictionary size and compressed size */ memcpy(largestDictbuffer, customDictContent, dictContentSize); dictContentSize = ZDICT_finalizeDictionary( largestDictbuffer, dictContentSize, customDictContent, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); if (ZDICT_isError(dictContentSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(dictContentSize); } totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, largestDictbuffer, dictContentSize); if (ZSTD_isError(totalCompressedSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(totalCompressedSize); } if (params.shrinkDict == 0) { COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; free(candidateDictBuffer); return selection; } largestDict = dictContentSize; largestCompressed = totalCompressedSize; dictContentSize = ZDICT_DICTSIZE_MIN; /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */ while (dictContentSize < largestDict) { memcpy(candidateDictBuffer, largestDictbuffer, largestDict); dictContentSize = ZDICT_finalizeDictionary( candidateDictBuffer, dictContentSize, customDictContentEnd - dictContentSize, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); if (ZDICT_isError(dictContentSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(dictContentSize); } totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, candidateDictBuffer, dictContentSize); if (ZSTD_isError(totalCompressedSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(totalCompressedSize); } if (totalCompressedSize <= largestCompressed * regressionTolerance) { COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize }; free(largestDictbuffer); return selection; } dictContentSize *= 2; } dictContentSize = largestDict; totalCompressedSize = largestCompressed; { COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; free(candidateDictBuffer); return selection; } } /** * Parameters for COVER_tryParameters(). */ typedef struct COVER_tryParameters_data_s { const COVER_ctx_t *ctx; COVER_best_t *best; size_t dictBufferCapacity; ZDICT_cover_params_t parameters; } COVER_tryParameters_data_t; /** * Tries a set of parameters and updates the COVER_best_t with the results. * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ static void COVER_tryParameters(void *opaque) { /* Save parameters as local variables */ COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; const COVER_ctx_t *const ctx = data->ctx; const ZDICT_cover_params_t parameters = data->parameters; size_t dictBufferCapacity = data->dictBufferCapacity; size_t totalCompressedSize = ERROR(GENERIC); /* Allocate space for hash table, dict, and freqs */ COVER_map_t activeDmers; BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); goto _cleanup; } if (!dict || !freqs) { DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); goto _cleanup; } /* Copy the frequencies because we need to modify them */ memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); /* Build the dictionary */ { const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, dictBufferCapacity, parameters); selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, totalCompressedSize); if (COVER_dictSelectionIsError(selection)) { DISPLAYLEVEL(1, "Failed to select dictionary\n"); goto _cleanup; } } _cleanup: free(dict); COVER_best_finish(data->best, parameters, selection); free(data); COVER_map_destroy(&activeDmers); COVER_dictSelectionFree(selection); if (freqs) { free(freqs); } } ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t *parameters) { /* constants */ const unsigned nbThreads = parameters->nbThreads; const double splitPoint = parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); const unsigned kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); const unsigned shrinkDict = 0; /* Local variables */ const int displayLevel = parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { DISPLAYLEVEL(1, "Cover must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } if (nbThreads > 1) { pool = POOL_create(nbThreads, 1); if (!pool) { return ERROR(memory_allocation); } } /* Initialization */ COVER_best_init(&best); /* Turn down global display level to clean up display at level 2 and below */ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ COVER_ctx_t ctx; LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); { const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint); if (ZSTD_isError(initVal)) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; } } if (!warned) { COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel); warned = 1; } /* Loop through k reusing the same context */ for (k = kMinK; k <= kMaxK; k += kStepSize) { /* Prepare the arguments */ COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( sizeof(COVER_tryParameters_data_t)); LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); if (!data) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); COVER_ctx_destroy(&ctx); POOL_free(pool); return ERROR(memory_allocation); } data->ctx = &ctx; data->best = &best; data->dictBufferCapacity = dictBufferCapacity; data->parameters = *parameters; data->parameters.k = k; data->parameters.d = d; data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; data->parameters.zParams.notificationLevel = g_displayLevel; /* Check the parameters */ if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); free(data); continue; } /* Call the function and pass ownership of data to it */ COVER_best_start(&best); if (pool) { POOL_add(pool, &COVER_tryParameters, data); } else { COVER_tryParameters(data); } /* Print status */ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); COVER_ctx_destroy(&ctx); } LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; if (ZSTD_isError(best.compressedSize)) { const size_t compressedSize = best.compressedSize; COVER_best_destroy(&best); POOL_free(pool); return compressedSize; } *parameters = best.parameters; memcpy(dictBuffer, best.dict, dictSize); COVER_best_destroy(&best); POOL_free(pool); return dictSize; } } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.c0000644000175000017500000015257113771325506026151 0ustar useruser00000000000000/* * divsufsort.c for libdivsufsort-lite * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /*- Compiler specifics -*/ #ifdef __clang__ #pragma clang diagnostic ignored "-Wshorten-64-to-32" #endif #if defined(_MSC_VER) # pragma warning(disable : 4244) # pragma warning(disable : 4127) /* C4127 : Condition expression is constant */ #endif /*- Dependencies -*/ #include #include #include #include "divsufsort.h" /*- Constants -*/ #if defined(INLINE) # undef INLINE #endif #if !defined(INLINE) # define INLINE __inline #endif #if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1) # undef ALPHABET_SIZE #endif #if !defined(ALPHABET_SIZE) # define ALPHABET_SIZE (256) #endif #define BUCKET_A_SIZE (ALPHABET_SIZE) #define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE) #if defined(SS_INSERTIONSORT_THRESHOLD) # if SS_INSERTIONSORT_THRESHOLD < 1 # undef SS_INSERTIONSORT_THRESHOLD # define SS_INSERTIONSORT_THRESHOLD (1) # endif #else # define SS_INSERTIONSORT_THRESHOLD (8) #endif #if defined(SS_BLOCKSIZE) # if SS_BLOCKSIZE < 0 # undef SS_BLOCKSIZE # define SS_BLOCKSIZE (0) # elif 32768 <= SS_BLOCKSIZE # undef SS_BLOCKSIZE # define SS_BLOCKSIZE (32767) # endif #else # define SS_BLOCKSIZE (1024) #endif /* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */ #if SS_BLOCKSIZE == 0 # define SS_MISORT_STACKSIZE (96) #elif SS_BLOCKSIZE <= 4096 # define SS_MISORT_STACKSIZE (16) #else # define SS_MISORT_STACKSIZE (24) #endif #define SS_SMERGE_STACKSIZE (32) #define TR_INSERTIONSORT_THRESHOLD (8) #define TR_STACKSIZE (64) /*- Macros -*/ #ifndef SWAP # define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0) #endif /* SWAP */ #ifndef MIN # define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) #endif /* MIN */ #ifndef MAX # define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b)) #endif /* MAX */ #define STACK_PUSH(_a, _b, _c, _d)\ do {\ assert(ssize < STACK_SIZE);\ stack[ssize].a = (_a), stack[ssize].b = (_b),\ stack[ssize].c = (_c), stack[ssize++].d = (_d);\ } while(0) #define STACK_PUSH5(_a, _b, _c, _d, _e)\ do {\ assert(ssize < STACK_SIZE);\ stack[ssize].a = (_a), stack[ssize].b = (_b),\ stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\ } while(0) #define STACK_POP(_a, _b, _c, _d)\ do {\ assert(0 <= ssize);\ if(ssize == 0) { return; }\ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ (_c) = stack[ssize].c, (_d) = stack[ssize].d;\ } while(0) #define STACK_POP5(_a, _b, _c, _d, _e)\ do {\ assert(0 <= ssize);\ if(ssize == 0) { return; }\ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\ } while(0) #define BUCKET_A(_c0) bucket_A[(_c0)] #if ALPHABET_SIZE == 256 #define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)]) #define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)]) #else #define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)]) #define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)]) #endif /*- Private Functions -*/ static const int lg_table[256]= { -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 }; #if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) static INLINE int ss_ilg(int n) { #if SS_BLOCKSIZE == 0 return (n & 0xffff0000) ? ((n & 0xff000000) ? 24 + lg_table[(n >> 24) & 0xff] : 16 + lg_table[(n >> 16) & 0xff]) : ((n & 0x0000ff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]); #elif SS_BLOCKSIZE < 256 return lg_table[n]; #else return (n & 0xff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]; #endif } #endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ #if SS_BLOCKSIZE != 0 static const int sqq_table[256] = { 0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61, 64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89, 90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168, 169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180, 181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191, 192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201, 202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211, 212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221, 221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230, 230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238, 239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247, 247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255 }; static INLINE int ss_isqrt(int x) { int y, e; if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; } e = (x & 0xffff0000) ? ((x & 0xff000000) ? 24 + lg_table[(x >> 24) & 0xff] : 16 + lg_table[(x >> 16) & 0xff]) : ((x & 0x0000ff00) ? 8 + lg_table[(x >> 8) & 0xff] : 0 + lg_table[(x >> 0) & 0xff]); if(e >= 16) { y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7); if(e >= 24) { y = (y + 1 + x / y) >> 1; } y = (y + 1 + x / y) >> 1; } else if(e >= 8) { y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1; } else { return sqq_table[x] >> 4; } return (x < (y * y)) ? y - 1 : y; } #endif /* SS_BLOCKSIZE != 0 */ /*---------------------------------------------------------------------------*/ /* Compares two suffixes. */ static INLINE int ss_compare(const unsigned char *T, const int *p1, const int *p2, int depth) { const unsigned char *U1, *U2, *U1n, *U2n; for(U1 = T + depth + *p1, U2 = T + depth + *p2, U1n = T + *(p1 + 1) + 2, U2n = T + *(p2 + 1) + 2; (U1 < U1n) && (U2 < U2n) && (*U1 == *U2); ++U1, ++U2) { } return U1 < U1n ? (U2 < U2n ? *U1 - *U2 : 1) : (U2 < U2n ? -1 : 0); } /*---------------------------------------------------------------------------*/ #if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) /* Insertionsort for small size groups */ static void ss_insertionsort(const unsigned char *T, const int *PA, int *first, int *last, int depth) { int *i, *j; int t; int r; for(i = last - 2; first <= i; --i) { for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) { do { *(j - 1) = *j; } while((++j < last) && (*j < 0)); if(last <= j) { break; } } if(r == 0) { *j = ~*j; } *(j - 1) = t; } } #endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */ /*---------------------------------------------------------------------------*/ #if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) static INLINE void ss_fixdown(const unsigned char *Td, const int *PA, int *SA, int i, int size) { int j, k; int v; int c, d, e; for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { d = Td[PA[SA[k = j++]]]; if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; } if(d <= c) { break; } } SA[i] = v; } /* Simple top-down heapsort. */ static void ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) { int i, m; int t; m = size; if((size % 2) == 0) { m--; if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); } } for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); } if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); } for(i = m - 1; 0 < i; --i) { t = SA[0], SA[0] = SA[i]; ss_fixdown(Td, PA, SA, 0, i); SA[i] = t; } } /*---------------------------------------------------------------------------*/ /* Returns the median of three elements. */ static INLINE int * ss_median3(const unsigned char *Td, const int *PA, int *v1, int *v2, int *v3) { int *t; if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); } if(Td[PA[*v2]] > Td[PA[*v3]]) { if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; } else { return v3; } } return v2; } /* Returns the median of five elements. */ static INLINE int * ss_median5(const unsigned char *Td, const int *PA, int *v1, int *v2, int *v3, int *v4, int *v5) { int *t; if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); } if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); } if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); } if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); } if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); } if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; } return v3; } /* Returns the pivot element. */ static INLINE int * ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) { int *middle; int t; t = last - first; middle = first + t / 2; if(t <= 512) { if(t <= 32) { return ss_median3(Td, PA, first, middle, last - 1); } else { t >>= 2; return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1); } } t >>= 3; first = ss_median3(Td, PA, first, first + t, first + (t << 1)); middle = ss_median3(Td, PA, middle - t, middle, middle + t); last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1); return ss_median3(Td, PA, first, middle, last); } /*---------------------------------------------------------------------------*/ /* Binary partition for substrings. */ static INLINE int * ss_partition(const int *PA, int *first, int *last, int depth) { int *a, *b; int t; for(a = first - 1, b = last;;) { for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; } for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { } if(b <= a) { break; } t = ~*b; *b = *a; *a = t; } if(first < a) { *first = ~*first; } return a; } /* Multikey introsort for medium size groups. */ static void ss_mintrosort(const unsigned char *T, const int *PA, int *first, int *last, int depth) { #define STACK_SIZE SS_MISORT_STACKSIZE struct { int *a, *b, c; int d; } stack[STACK_SIZE]; const unsigned char *Td; int *a, *b, *c, *d, *e, *f; int s, t; int ssize; int limit; int v, x = 0; for(ssize = 0, limit = ss_ilg(last - first);;) { if((last - first) <= SS_INSERTIONSORT_THRESHOLD) { #if 1 < SS_INSERTIONSORT_THRESHOLD if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); } #endif STACK_POP(first, last, depth, limit); continue; } Td = T + depth; if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); } if(limit < 0) { for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) { if((x = Td[PA[*a]]) != v) { if(1 < (a - first)) { break; } v = x; first = a; } } if(Td[PA[*first] - 1] < v) { first = ss_partition(PA, first, a, depth); } if((a - first) <= (last - a)) { if(1 < (a - first)) { STACK_PUSH(a, last, depth, -1); last = a, depth += 1, limit = ss_ilg(a - first); } else { first = a, limit = -1; } } else { if(1 < (last - a)) { STACK_PUSH(first, a, depth + 1, ss_ilg(a - first)); first = a, limit = -1; } else { last = a, depth += 1, limit = ss_ilg(a - first); } } continue; } /* choose pivot */ a = ss_pivot(Td, PA, first, last); v = Td[PA[*a]]; SWAP(*first, *a); /* partition */ for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { } if(((a = b) < last) && (x < v)) { for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } } for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { } if((b < (d = c)) && (x > v)) { for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } for(; b < c;) { SWAP(*b, *c); for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } if(a <= d) { c = b - 1; if((s = a - first) > (t = b - a)) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } if((s = d - c) > (t = last - d - 1)) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } a = first + (b - a), c = last - (d - c); b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth); if((a - first) <= (last - c)) { if((last - c) <= (c - b)) { STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); STACK_PUSH(c, last, depth, limit); last = a; } else if((a - first) <= (c - b)) { STACK_PUSH(c, last, depth, limit); STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); last = a; } else { STACK_PUSH(c, last, depth, limit); STACK_PUSH(first, a, depth, limit); first = b, last = c, depth += 1, limit = ss_ilg(c - b); } } else { if((a - first) <= (c - b)) { STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); STACK_PUSH(first, a, depth, limit); first = c; } else if((last - c) <= (c - b)) { STACK_PUSH(first, a, depth, limit); STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); first = c; } else { STACK_PUSH(first, a, depth, limit); STACK_PUSH(c, last, depth, limit); first = b, last = c, depth += 1, limit = ss_ilg(c - b); } } } else { limit += 1; if(Td[PA[*first] - 1] < v) { first = ss_partition(PA, first, last, depth); limit = ss_ilg(last - first); } depth += 1; } } #undef STACK_SIZE } #endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ /*---------------------------------------------------------------------------*/ #if SS_BLOCKSIZE != 0 static INLINE void ss_blockswap(int *a, int *b, int n) { int t; for(; 0 < n; --n, ++a, ++b) { t = *a, *a = *b, *b = t; } } static INLINE void ss_rotate(int *first, int *middle, int *last) { int *a, *b, t; int l, r; l = middle - first, r = last - middle; for(; (0 < l) && (0 < r);) { if(l == r) { ss_blockswap(first, middle, l); break; } if(l < r) { a = last - 1, b = middle - 1; t = *a; do { *a-- = *b, *b-- = *a; if(b < first) { *a = t; last = a; if((r -= l + 1) <= l) { break; } a -= 1, b = middle - 1; t = *a; } } while(1); } else { a = first, b = middle; t = *a; do { *a++ = *b, *b++ = *a; if(last <= b) { *a = t; first = a + 1; if((l -= r + 1) <= r) { break; } a += 1, b = middle; t = *a; } } while(1); } } } /*---------------------------------------------------------------------------*/ static void ss_inplacemerge(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int depth) { const int *p; int *a, *b; int len, half; int q, r; int x; for(;;) { if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); } else { x = 0; p = PA + *(last - 1); } for(a = first, len = middle - first, half = len >> 1, r = -1; 0 < len; len = half, half >>= 1) { b = a + half; q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth); if(q < 0) { a = b + 1; half -= (len & 1) ^ 1; } else { r = q; } } if(a < middle) { if(r == 0) { *a = ~*a; } ss_rotate(a, middle, last); last -= middle - a; middle = a; if(first == middle) { break; } } --last; if(x != 0) { while(*--last < 0) { } } if(middle == last) { break; } } } /*---------------------------------------------------------------------------*/ /* Merge-forward with internal buffer. */ static void ss_mergeforward(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) { int *a, *b, *c, *bufend; int t; int r; bufend = buf + (middle - first) - 1; ss_blockswap(buf, first, middle - first); for(t = *(a = first), b = buf, c = middle;;) { r = ss_compare(T, PA + *b, PA + *c, depth); if(r < 0) { do { *a++ = *b; if(bufend <= b) { *bufend = t; return; } *b++ = *a; } while(*b < 0); } else if(r > 0) { do { *a++ = *c, *c++ = *a; if(last <= c) { while(b < bufend) { *a++ = *b, *b++ = *a; } *a = *b, *b = t; return; } } while(*c < 0); } else { *c = ~*c; do { *a++ = *b; if(bufend <= b) { *bufend = t; return; } *b++ = *a; } while(*b < 0); do { *a++ = *c, *c++ = *a; if(last <= c) { while(b < bufend) { *a++ = *b, *b++ = *a; } *a = *b, *b = t; return; } } while(*c < 0); } } } /* Merge-backward with internal buffer. */ static void ss_mergebackward(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) { const int *p1, *p2; int *a, *b, *c, *bufend; int t; int r; int x; bufend = buf + (last - middle) - 1; ss_blockswap(buf, middle, last - middle); x = 0; if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; } else { p1 = PA + *bufend; } if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; } else { p2 = PA + *(middle - 1); } for(t = *(a = last - 1), b = bufend, c = middle - 1;;) { r = ss_compare(T, p1, p2, depth); if(0 < r) { if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } *a-- = *b; if(b <= buf) { *buf = t; break; } *b-- = *a; if(*b < 0) { p1 = PA + ~*b; x |= 1; } else { p1 = PA + *b; } } else if(r < 0) { if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } *a-- = *c, *c-- = *a; if(c < first) { while(buf < b) { *a-- = *b, *b-- = *a; } *a = *b, *b = t; break; } if(*c < 0) { p2 = PA + ~*c; x |= 2; } else { p2 = PA + *c; } } else { if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } *a-- = ~*b; if(b <= buf) { *buf = t; break; } *b-- = *a; if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } *a-- = *c, *c-- = *a; if(c < first) { while(buf < b) { *a-- = *b, *b-- = *a; } *a = *b, *b = t; break; } if(*b < 0) { p1 = PA + ~*b; x |= 1; } else { p1 = PA + *b; } if(*c < 0) { p2 = PA + ~*c; x |= 2; } else { p2 = PA + *c; } } } } /* D&C based merge. */ static void ss_swapmerge(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int bufsize, int depth) { #define STACK_SIZE SS_SMERGE_STACKSIZE #define GETIDX(a) ((0 <= (a)) ? (a) : (~(a))) #define MERGE_CHECK(a, b, c)\ do {\ if(((c) & 1) ||\ (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\ *(a) = ~*(a);\ }\ if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\ *(b) = ~*(b);\ }\ } while(0) struct { int *a, *b, *c; int d; } stack[STACK_SIZE]; int *l, *r, *lm, *rm; int m, len, half; int ssize; int check, next; for(check = 0, ssize = 0;;) { if((last - middle) <= bufsize) { if((first < middle) && (middle < last)) { ss_mergebackward(T, PA, first, middle, last, buf, depth); } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); continue; } if((middle - first) <= bufsize) { if(first < middle) { ss_mergeforward(T, PA, first, middle, last, buf, depth); } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); continue; } for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1; 0 < len; len = half, half >>= 1) { if(ss_compare(T, PA + GETIDX(*(middle + m + half)), PA + GETIDX(*(middle - m - half - 1)), depth) < 0) { m += half + 1; half -= (len & 1) ^ 1; } } if(0 < m) { lm = middle - m, rm = middle + m; ss_blockswap(lm, middle, m); l = r = middle, next = 0; if(rm < last) { if(*rm < 0) { *rm = ~*rm; if(first < lm) { for(; *--l < 0;) { } next |= 4; } next |= 1; } else if(first < lm) { for(; *r < 0; ++r) { } next |= 2; } } if((l - first) <= (last - r)) { STACK_PUSH(r, rm, last, (next & 3) | (check & 4)); middle = lm, last = l, check = (check & 3) | (next & 4); } else { if((next & 2) && (r == middle)) { next ^= 6; } STACK_PUSH(first, lm, l, (check & 3) | (next & 4)); first = r, middle = rm, check = (next & 3) | (check & 4); } } else { if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) { *middle = ~*middle; } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); } } #undef STACK_SIZE } #endif /* SS_BLOCKSIZE != 0 */ /*---------------------------------------------------------------------------*/ /* Substring sort */ static void sssort(const unsigned char *T, const int *PA, int *first, int *last, int *buf, int bufsize, int depth, int n, int lastsuffix) { int *a; #if SS_BLOCKSIZE != 0 int *b, *middle, *curbuf; int j, k, curbufsize, limit; #endif int i; if(lastsuffix != 0) { ++first; } #if SS_BLOCKSIZE == 0 ss_mintrosort(T, PA, first, last, depth); #else if((bufsize < SS_BLOCKSIZE) && (bufsize < (last - first)) && (bufsize < (limit = ss_isqrt(last - first)))) { if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; } buf = middle = last - limit, bufsize = limit; } else { middle = last, limit = 0; } for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) { #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth); #endif curbufsize = last - (a + SS_BLOCKSIZE); curbuf = a + SS_BLOCKSIZE; if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; } for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) { ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth); } } #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, a, middle, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, a, middle, depth); #endif for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) { if(i & 1) { ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth); a -= k; } } if(limit != 0) { #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, middle, last, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, middle, last, depth); #endif ss_inplacemerge(T, PA, first, middle, last, depth); } #endif if(lastsuffix != 0) { /* Insert last type B* suffix. */ int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2; for(a = first, i = *(first - 1); (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth))); ++a) { *(a - 1) = *a; } *(a - 1) = i; } } /*---------------------------------------------------------------------------*/ static INLINE int tr_ilg(int n) { return (n & 0xffff0000) ? ((n & 0xff000000) ? 24 + lg_table[(n >> 24) & 0xff] : 16 + lg_table[(n >> 16) & 0xff]) : ((n & 0x0000ff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]); } /*---------------------------------------------------------------------------*/ /* Simple insertionsort for small size groups. */ static void tr_insertionsort(const int *ISAd, int *first, int *last) { int *a, *b; int t, r; for(a = first + 1; a < last; ++a) { for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) { do { *(b + 1) = *b; } while((first <= --b) && (*b < 0)); if(b < first) { break; } } if(r == 0) { *b = ~*b; } *(b + 1) = t; } } /*---------------------------------------------------------------------------*/ static INLINE void tr_fixdown(const int *ISAd, int *SA, int i, int size) { int j, k; int v; int c, d, e; for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { d = ISAd[SA[k = j++]]; if(d < (e = ISAd[SA[j]])) { k = j; d = e; } if(d <= c) { break; } } SA[i] = v; } /* Simple top-down heapsort. */ static void tr_heapsort(const int *ISAd, int *SA, int size) { int i, m; int t; m = size; if((size % 2) == 0) { m--; if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); } } for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); } if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); } for(i = m - 1; 0 < i; --i) { t = SA[0], SA[0] = SA[i]; tr_fixdown(ISAd, SA, 0, i); SA[i] = t; } } /*---------------------------------------------------------------------------*/ /* Returns the median of three elements. */ static INLINE int * tr_median3(const int *ISAd, int *v1, int *v2, int *v3) { int *t; if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); } if(ISAd[*v2] > ISAd[*v3]) { if(ISAd[*v1] > ISAd[*v3]) { return v1; } else { return v3; } } return v2; } /* Returns the median of five elements. */ static INLINE int * tr_median5(const int *ISAd, int *v1, int *v2, int *v3, int *v4, int *v5) { int *t; if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); } if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); } if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); } if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); } if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); } if(ISAd[*v3] > ISAd[*v4]) { return v4; } return v3; } /* Returns the pivot element. */ static INLINE int * tr_pivot(const int *ISAd, int *first, int *last) { int *middle; int t; t = last - first; middle = first + t / 2; if(t <= 512) { if(t <= 32) { return tr_median3(ISAd, first, middle, last - 1); } else { t >>= 2; return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1); } } t >>= 3; first = tr_median3(ISAd, first, first + t, first + (t << 1)); middle = tr_median3(ISAd, middle - t, middle, middle + t); last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1); return tr_median3(ISAd, first, middle, last); } /*---------------------------------------------------------------------------*/ typedef struct _trbudget_t trbudget_t; struct _trbudget_t { int chance; int remain; int incval; int count; }; static INLINE void trbudget_init(trbudget_t *budget, int chance, int incval) { budget->chance = chance; budget->remain = budget->incval = incval; } static INLINE int trbudget_check(trbudget_t *budget, int size) { if(size <= budget->remain) { budget->remain -= size; return 1; } if(budget->chance == 0) { budget->count += size; return 0; } budget->remain += budget->incval - size; budget->chance -= 1; return 1; } /*---------------------------------------------------------------------------*/ static INLINE void tr_partition(const int *ISAd, int *first, int *middle, int *last, int **pa, int **pb, int v) { int *a, *b, *c, *d, *e, *f; int t, s; int x = 0; for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { } if(((a = b) < last) && (x < v)) { for(; (++b < last) && ((x = ISAd[*b]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } } for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { } if((b < (d = c)) && (x > v)) { for(; (b < --c) && ((x = ISAd[*c]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } for(; b < c;) { SWAP(*b, *c); for(; (++b < c) && ((x = ISAd[*b]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } for(; (b < --c) && ((x = ISAd[*c]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } if(a <= d) { c = b - 1; if((s = a - first) > (t = b - a)) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } if((s = d - c) > (t = last - d - 1)) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } first += (b - a), last -= (d - c); } *pa = first, *pb = last; } static void tr_copy(int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) { /* sort suffixes of middle partition by using sorted order of suffixes of left and right partition. */ int *c, *d, *e; int s, v; v = b - SA - 1; for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; ISA[s] = d - SA; } } for(c = last - 1, e = d + 1, d = b; e < d; --c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; ISA[s] = d - SA; } } } static void tr_partialcopy(int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) { int *c, *d, *e; int s, v; int rank, lastrank, newrank = -1; v = b - SA - 1; lastrank = -1; for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; rank = ISA[s + depth]; if(lastrank != rank) { lastrank = rank; newrank = d - SA; } ISA[s] = newrank; } } lastrank = -1; for(e = d; first <= e; --e) { rank = ISA[*e]; if(lastrank != rank) { lastrank = rank; newrank = e - SA; } if(newrank != rank) { ISA[*e] = newrank; } } lastrank = -1; for(c = last - 1, e = d + 1, d = b; e < d; --c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; rank = ISA[s + depth]; if(lastrank != rank) { lastrank = rank; newrank = d - SA; } ISA[s] = newrank; } } } static void tr_introsort(int *ISA, const int *ISAd, int *SA, int *first, int *last, trbudget_t *budget) { #define STACK_SIZE TR_STACKSIZE struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE]; int *a, *b, *c; int t; int v, x = 0; int incr = ISAd - ISA; int limit, next; int ssize, trlink = -1; for(ssize = 0, limit = tr_ilg(last - first);;) { if(limit < 0) { if(limit == -1) { /* tandem repeat partition */ tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1); /* update ranks */ if(a < last) { for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } } if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } /* push */ if(1 < (b - a)) { STACK_PUSH5(NULL, a, b, 0, 0); STACK_PUSH5(ISAd - incr, first, last, -2, trlink); trlink = ssize - 2; } if((a - first) <= (last - b)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink); last = a, limit = tr_ilg(a - first); } else if(1 < (last - b)) { first = b, limit = tr_ilg(last - b); } else { STACK_POP5(ISAd, first, last, limit, trlink); } } else { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink); first = b, limit = tr_ilg(last - b); } else if(1 < (a - first)) { last = a, limit = tr_ilg(a - first); } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } else if(limit == -2) { /* tandem repeat copy */ a = stack[--ssize].b, b = stack[ssize].c; if(stack[ssize].d == 0) { tr_copy(ISA, SA, first, a, b, last, ISAd - ISA); } else { if(0 <= trlink) { stack[trlink].d = -1; } tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA); } STACK_POP5(ISAd, first, last, limit, trlink); } else { /* sorted partition */ if(0 <= *first) { a = first; do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a)); first = a; } if(first < last) { a = first; do { *a = ~*a; } while(*++a < 0); next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1; if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } } /* push */ if(trbudget_check(budget, a - first)) { if((a - first) <= (last - a)) { STACK_PUSH5(ISAd, a, last, -3, trlink); ISAd += incr, last = a, limit = next; } else { if(1 < (last - a)) { STACK_PUSH5(ISAd + incr, first, a, next, trlink); first = a, limit = -3; } else { ISAd += incr, last = a, limit = next; } } } else { if(0 <= trlink) { stack[trlink].d = -1; } if(1 < (last - a)) { first = a, limit = -3; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } else { STACK_POP5(ISAd, first, last, limit, trlink); } } continue; } if((last - first) <= TR_INSERTIONSORT_THRESHOLD) { tr_insertionsort(ISAd, first, last); limit = -3; continue; } if(limit-- == 0) { tr_heapsort(ISAd, first, last - first); for(a = last - 1; first < a; a = b) { for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; } } limit = -3; continue; } /* choose pivot */ a = tr_pivot(ISAd, first, last); SWAP(*first, *a); v = ISAd[*first]; /* partition */ tr_partition(ISAd, first, first + 1, last, &a, &b, v); if((last - first) != (b - a)) { next = (ISA[*a] != v) ? tr_ilg(b - a) : -1; /* update ranks */ for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } /* push */ if((1 < (b - a)) && (trbudget_check(budget, b - a))) { if((a - first) <= (last - b)) { if((last - b) <= (b - a)) { if(1 < (a - first)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); STACK_PUSH5(ISAd, b, last, limit, trlink); last = a; } else if(1 < (last - b)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); first = b; } else { ISAd += incr, first = a, last = b, limit = next; } } else if((a - first) <= (b - a)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, limit, trlink); STACK_PUSH5(ISAd + incr, a, b, next, trlink); last = a; } else { STACK_PUSH5(ISAd, b, last, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { STACK_PUSH5(ISAd, b, last, limit, trlink); STACK_PUSH5(ISAd, first, a, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { if((a - first) <= (b - a)) { if(1 < (last - b)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); STACK_PUSH5(ISAd, first, a, limit, trlink); first = b; } else if(1 < (a - first)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); last = a; } else { ISAd += incr, first = a, last = b, limit = next; } } else if((last - b) <= (b - a)) { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, limit, trlink); STACK_PUSH5(ISAd + incr, a, b, next, trlink); first = b; } else { STACK_PUSH5(ISAd, first, a, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { STACK_PUSH5(ISAd, first, a, limit, trlink); STACK_PUSH5(ISAd, b, last, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } } else { if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; } if((a - first) <= (last - b)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, limit, trlink); last = a; } else if(1 < (last - b)) { first = b; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } else { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, limit, trlink); first = b; } else if(1 < (a - first)) { last = a; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } } else { if(trbudget_check(budget, last - first)) { limit = tr_ilg(last - first), ISAd += incr; } else { if(0 <= trlink) { stack[trlink].d = -1; } STACK_POP5(ISAd, first, last, limit, trlink); } } } #undef STACK_SIZE } /*---------------------------------------------------------------------------*/ /* Tandem repeat sort */ static void trsort(int *ISA, int *SA, int n, int depth) { int *ISAd; int *first, *last; trbudget_t budget; int t, skip, unsorted; trbudget_init(&budget, tr_ilg(n) * 2 / 3, n); /* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */ for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) { first = SA; skip = 0; unsorted = 0; do { if((t = *first) < 0) { first -= t; skip += t; } else { if(skip != 0) { *(first + skip) = skip; skip = 0; } last = SA + ISA[t] + 1; if(1 < (last - first)) { budget.count = 0; tr_introsort(ISA, ISAd, SA, first, last, &budget); if(budget.count != 0) { unsorted += budget.count; } else { skip = first - last; } } else if((last - first) == 1) { skip = -1; } first = last; } } while(first < (SA + n)); if(skip != 0) { *(first + skip) = skip; } if(unsorted == 0) { break; } } } /*---------------------------------------------------------------------------*/ /* Sorts suffixes of type B*. */ static int sort_typeBstar(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int openMP) { int *PAb, *ISAb, *buf; #ifdef LIBBSC_OPENMP int *curbuf; int l; #endif int i, j, k, t, m, bufsize; int c0, c1; #ifdef LIBBSC_OPENMP int d0, d1; #endif (void)openMP; /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef LIBBSC_OPENMP if (openMP) { buf = SA + m; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1) { bufsize = (n - (2 * m)) / omp_get_num_threads(); curbuf = buf + omp_get_thread_num() * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } } else { buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of tyoe B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) { int *i, *j, *k; int s; int c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static int construct_BWT(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) { int *i, *j, *k, *orig; int s; int c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((int)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static int construct_BWT_indexes(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m, unsigned char * num_indexes, int * indexes) { int *i, *j, *k, *orig; int s; int c0, c1, c2; int mod = n / 8; { mod |= mod >> 1; mod |= mod >> 2; mod |= mod >> 4; mod |= mod >> 8; mod |= mod >> 16; mod >>= 1; *num_indexes = (unsigned char)((n - 1) / (mod + 1)); } if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA; c0 = T[--s]; *j = ~((int)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); if (T[n - 2] < c2) { if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA; *k++ = ~((int)T[n - 2]); } else { *k++ = n - 1; } /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA; c0 = T[--s]; *i = c0; if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); if((0 < s) && (T[s - 1] < c0)) { if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA; *k++ = ~((int)T[s - 1]); } else *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /*---------------------------------------------------------------------------*/ /*- Function -*/ int divsufsort(const unsigned char *T, int *SA, int n, int openMP) { int *bucket_A, *bucket_B; int m; int err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); /* Suffixsort. */ if((bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP); construct_SA(T, SA, bucket_A, bucket_B, n, m); } else { err = -2; } free(bucket_B); free(bucket_A); return err; } int divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) { int *B; int *bucket_A, *bucket_B; int m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); } bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP); if (num_indexes == NULL || indexes == NULL) { pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); } else { pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes); } /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; } for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.h0000644000175000017500000000456313771325506026153 0ustar useruser00000000000000/* * divsufsort.h for libdivsufsort-lite * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DIVSUFSORT_H #define _DIVSUFSORT_H 1 #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /*- Prototypes -*/ /** * Constructs the suffix array of a given string. * @param T [0..n-1] The input string. * @param SA [0..n-1] The output array of suffixes. * @param n The length of the given string. * @param openMP enables OpenMP optimization. * @return 0 if no error occurred, -1 or -2 otherwise. */ int divsufsort(const unsigned char *T, int *SA, int n, int openMP); /** * Constructs the burrows-wheeler transformed string of a given string. * @param T [0..n-1] The input string. * @param U [0..n-1] The output string. (can be T) * @param A [0..n-1] The temporary array. (can be NULL) * @param n The length of the given string. * @param num_indexes The length of secondary indexes array. (can be NULL) * @param indexes The secondary indexes array. (can be NULL) * @param openMP enables OpenMP optimization. * @return The primary index if no error occurred, -1 or -2 otherwise. */ int divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP); #ifdef __cplusplus } /* extern "C" */ #endif /* __cplusplus */ #endif /* _DIVSUFSORT_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/zdict.c0000644000175000017500000012734113771325506025053 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************** * Tuning parameters ****************************************/ #define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */ #define ZDICT_MAX_SAMPLES_SIZE (2000U << 20) #define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO) /*-************************************** * Compiler Options ****************************************/ /* Unix Large Files support (>4GB) */ #define _FILE_OFFSET_BITS 64 #if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */ # define _LARGEFILE_SOURCE #elif ! defined(__LP64__) /* No point defining Large file for 64 bit */ # define _LARGEFILE64_SOURCE #endif /*-************************************* * Dependencies ***************************************/ #include /* malloc, free */ #include /* memset */ #include /* fprintf, fopen, ftello64 */ #include /* clock */ #include "../common/mem.h" /* read */ #include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */ #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" /* HUF_buildCTable, HUF_writeCTable */ #include "../common/zstd_internal.h" /* includes zstd.h */ #include "../common/xxhash.h" /* XXH64 */ #include "divsufsort.h" #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY #endif #include "zdict.h" #include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */ /*-************************************* * Constants ***************************************/ #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define DICTLISTSIZE_DEFAULT 10000 #define NOISELENGTH 32 static const int g_compressionLevel_default = 3; static const U32 g_selectivity_default = 9; /*-************************************* * Console display ***************************************/ #define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } #define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } static void ZDICT_printHex(const void* ptr, size_t length) { const BYTE* const b = (const BYTE*)ptr; size_t u; for (u=0; u126) c = '.'; /* non-printable char */ DISPLAY("%c", c); } } /*-******************************************************** * Helper functions **********************************************************/ unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize) { if (dictSize < 8) return 0; if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0; return MEM_readLE32((const char*)dictBuffer + 4); } size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize) { size_t headerSize; if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted); { unsigned offcodeMaxValue = MaxOff; ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t)); U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE); short* offcodeNCount = (short*)malloc((MaxOff+1)*sizeof(short)); if (!bs || !wksp || !offcodeNCount) { headerSize = ERROR(memory_allocation); } else { ZSTD_reset_compressedBlockState(bs); headerSize = ZSTD_loadCEntropy(bs, wksp, offcodeNCount, &offcodeMaxValue, dictBuffer, dictSize); } free(bs); free(wksp); free(offcodeNCount); } return headerSize; } /*-******************************************************** * Dictionary training functions **********************************************************/ static unsigned ZDICT_NbCommonBytes (size_t val) { if (MEM_isLittleEndian()) { if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; _BitScanForward64( &r, (U64)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctzll((U64)val) >> 3); # else static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r=0; _BitScanForward( &r, (U32)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctz((U32)val) >> 3); # else static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; # endif } } else { /* Big Endian CPU */ if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; _BitScanReverse64( &r, val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clzll(val) >> 3); # else unsigned r; const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } r += (!val); return r; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r = 0; _BitScanReverse( &r, (unsigned long)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clz((U32)val) >> 3); # else unsigned r; if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } r += (!val); return r; # endif } } } /*! ZDICT_count() : Count the nb of common bytes between 2 pointers. Note : this function presumes end of buffer followed by noisy guard band. */ static size_t ZDICT_count(const void* pIn, const void* pMatch) { const char* const pStart = (const char*)pIn; for (;;) { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); if (!diff) { pIn = (const char*)pIn+sizeof(size_t); pMatch = (const char*)pMatch+sizeof(size_t); continue; } pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); return (size_t)((const char*)pIn - pStart); } } typedef struct { U32 pos; U32 length; U32 savings; } dictItem; static void ZDICT_initDictItem(dictItem* d) { d->pos = 1; d->length = 0; d->savings = (U32)(-1); } #define LLIMIT 64 /* heuristic determined experimentally */ #define MINMATCHLENGTH 7 /* heuristic determined experimentally */ static dictItem ZDICT_analyzePos( BYTE* doneMarks, const int* suffix, U32 start, const void* buffer, U32 minRatio, U32 notificationLevel) { U32 lengthList[LLIMIT] = {0}; U32 cumulLength[LLIMIT] = {0}; U32 savings[LLIMIT] = {0}; const BYTE* b = (const BYTE*)buffer; size_t maxLength = LLIMIT; size_t pos = suffix[start]; U32 end = start; dictItem solution; /* init */ memset(&solution, 0, sizeof(solution)); doneMarks[pos] = 1; /* trivial repetition cases */ if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2)) ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3)) ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) { /* skip and mark segment */ U16 const pattern16 = MEM_read16(b+pos+4); U32 u, patternEnd = 6; while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ; if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++; for (u=1; u= MINMATCHLENGTH); } /* look backward */ { size_t length; do { length = ZDICT_count(b + pos, b + *(suffix+start-1)); if (length >=MINMATCHLENGTH) start--; } while(length >= MINMATCHLENGTH); } /* exit if not found a minimum nb of repetitions */ if (end-start < minRatio) { U32 idx; for(idx=start; idx= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos); DISPLAYLEVEL(4, "\n"); for (mml = MINMATCHLENGTH ; ; mml++) { BYTE currentChar = 0; U32 currentCount = 0; U32 currentID = refinedStart; U32 id; U32 selectedCount = 0; U32 selectedID = currentID; for (id =refinedStart; id < refinedEnd; id++) { if (b[suffix[id] + mml] != currentChar) { if (currentCount > selectedCount) { selectedCount = currentCount; selectedID = currentID; } currentID = id; currentChar = b[ suffix[id] + mml]; currentCount = 0; } currentCount ++; } if (currentCount > selectedCount) { /* for last */ selectedCount = currentCount; selectedID = currentID; } if (selectedCount < minRatio) break; refinedStart = selectedID; refinedEnd = refinedStart + selectedCount; } /* evaluate gain based on new dict */ start = refinedStart; pos = suffix[refinedStart]; end = start; memset(lengthList, 0, sizeof(lengthList)); /* look forward */ { size_t length; do { end++; length = ZDICT_count(b + pos, b + suffix[end]); if (length >= LLIMIT) length = LLIMIT-1; lengthList[length]++; } while (length >=MINMATCHLENGTH); } /* look backward */ { size_t length = MINMATCHLENGTH; while ((length >= MINMATCHLENGTH) & (start > 0)) { length = ZDICT_count(b + pos, b + suffix[start - 1]); if (length >= LLIMIT) length = LLIMIT - 1; lengthList[length]++; if (length >= MINMATCHLENGTH) start--; } } /* largest useful length */ memset(cumulLength, 0, sizeof(cumulLength)); cumulLength[maxLength-1] = lengthList[maxLength-1]; for (i=(int)(maxLength-2); i>=0; i--) cumulLength[i] = cumulLength[i+1] + lengthList[i]; for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break; maxLength = i; /* reduce maxLength in case of final into repetitive data */ { U32 l = (U32)maxLength; BYTE const c = b[pos + maxLength-1]; while (b[pos+l-2]==c) l--; maxLength = l; } if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */ /* calculate savings */ savings[5] = 0; for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) savings[i] = savings[i-1] + (lengthList[i] * (i-3)); DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength); solution.pos = (U32)pos; solution.length = (U32)maxLength; solution.savings = savings[maxLength]; /* mark positions done */ { U32 id; for (id=start; id solution.length) length = solution.length; } pEnd = (U32)(testedPos + length); for (p=testedPos; ppos; const U32 eltEnd = elt.pos + elt.length; const char* const buf = (const char*) buffer; /* tail overlap */ U32 u; for (u=1; u elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */ /* append */ U32 const addedLength = table[u].pos - elt.pos; table[u].length += addedLength; table[u].pos = elt.pos; table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ table[u].savings += elt.length / 8; /* rough approx bonus */ elt = table[u]; /* sort : improve rank */ while ((u>1) && (table[u-1].savings < elt.savings)) table[u] = table[u-1], u--; table[u] = elt; return u; } } /* front overlap */ for (u=1; u= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ /* append */ int const addedLength = (int)eltEnd - (table[u].pos + table[u].length); table[u].savings += elt.length / 8; /* rough approx bonus */ if (addedLength > 0) { /* otherwise, elt fully included into existing */ table[u].length += addedLength; table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ } /* sort : improve rank */ elt = table[u]; while ((u>1) && (table[u-1].savings < elt.savings)) table[u] = table[u-1], u--; table[u] = elt; return u; } if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) { if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) { size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 ); table[u].pos = elt.pos; table[u].savings += (U32)(elt.savings * addedLength / elt.length); table[u].length = MIN(elt.length, table[u].length + 1); return u; } } } return 0; } static void ZDICT_removeDictItem(dictItem* table, U32 id) { /* convention : table[0].pos stores nb of elts */ U32 const max = table[0].pos; U32 u; if (!id) return; /* protection, should never happen */ for (u=id; upos--; } static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer) { /* merge if possible */ U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer); if (mergeId) { U32 newMerge = 1; while (newMerge) { newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer); if (newMerge) ZDICT_removeDictItem(table, mergeId); mergeId = newMerge; } return; } /* insert */ { U32 current; U32 nextElt = table->pos; if (nextElt >= maxSize) nextElt = maxSize-1; current = nextElt-1; while (table[current].savings < elt.savings) { table[current+1] = table[current]; current--; } table[current+1] = elt; table->pos = nextElt+1; } } static U32 ZDICT_dictSize(const dictItem* dictList) { U32 u, dictSize = 0; for (u=1; u=l) { \ if (ZDICT_clockSpan(displayClock) > refreshRate) \ { displayClock = clock(); DISPLAY(__VA_ARGS__); \ if (notificationLevel>=4) fflush(stderr); } } /* init */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) { result = ERROR(memory_allocation); goto _cleanup; } if (minRatio < MINRATIO) minRatio = MINRATIO; memset(doneMarks, 0, bufferSize+16); /* limit sample set size (divsufsort limitation)*/ if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20)); while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles]; /* sort */ DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20)); { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } } suffix[bufferSize] = (int)bufferSize; /* leads into noise */ suffix0[0] = (int)bufferSize; /* leads into noise */ /* build reverse suffix sort */ { size_t pos; for (pos=0; pos < bufferSize; pos++) reverseSuffix[suffix[pos]] = (U32)pos; /* note filePos tracks borders between samples. It's not used at this stage, but planned to become useful in a later update */ filePos[0] = 0; for (pos=1; pos> 21); } } typedef struct { ZSTD_CDict* dict; /* dictionary */ ZSTD_CCtx* zc; /* working context */ void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */ } EStats_ress_t; #define MAXREPOFFSET 1024 static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params, unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets, const void* src, size_t srcSize, U32 notificationLevel) { size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog); size_t cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict); if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; } } cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); /* literals stats */ { const BYTE* bytePtr; for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) countLit[*bytePtr]++; } /* seqStats */ { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); ZSTD_seqToCodes(seqStorePtr); { const BYTE* codePtr = seqStorePtr->ofCode; U32 u; for (u=0; umlCode; U32 u; for (u=0; ullCode; U32 u; for (u=0; u= 2) { /* rep offsets */ const seqDef* const seq = seqStorePtr->sequencesStart; U32 offset1 = seq[0].offset - 3; U32 offset2 = seq[1].offset - 3; if (offset1 >= MAXREPOFFSET) offset1 = 0; if (offset2 >= MAXREPOFFSET) offset2 = 0; repOffsets[offset1] += 3; repOffsets[offset2] += 1; } } } } static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles) { size_t total=0; unsigned u; for (u=0; u0; u--) { offsetCount_t tmp; if (table[u-1].count >= table[u].count) break; tmp = table[u-1]; table[u-1] = table[u]; table[u] = tmp; } } /* ZDICT_flatLit() : * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. */ static void ZDICT_flatLit(unsigned* countLit) { int u; for (u=1; u<256; u++) countLit[u] = 2; countLit[0] = 4; countLit[253] = 1; countLit[254] = 1; } #define OFFCODE_MAX 30 /* only applicable to first block */ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, unsigned compressionLevel, const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles, const void* dictBuffer, size_t dictBufferSize, unsigned notificationLevel) { unsigned countLit[256]; HUF_CREATE_STATIC_CTABLE(hufTable, 255); unsigned offcodeCount[OFFCODE_MAX+1]; short offcodeNCount[OFFCODE_MAX+1]; U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB)); unsigned matchLengthCount[MaxML+1]; short matchLengthNCount[MaxML+1]; unsigned litLengthCount[MaxLL+1]; short litLengthNCount[MaxLL+1]; U32 repOffset[MAXREPOFFSET]; offsetCount_t bestRepOffset[ZSTD_REP_NUM+1]; EStats_ress_t esr = { NULL, NULL, NULL }; ZSTD_parameters params; U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total; size_t pos = 0, errorCode; size_t eSize = 0; size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); BYTE* dstPtr = (BYTE*)dstBuffer; /* init */ DEBUGLOG(4, "ZDICT_analyzeEntropy"); if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */ for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */ for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1; for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1; for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1; memset(repOffset, 0, sizeof(repOffset)); repOffset[1] = repOffset[4] = repOffset[8] = 1; memset(bestRepOffset, 0, sizeof(bestRepOffset)); if (compressionLevel==0) compressionLevel = g_compressionLevel_default; params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem); esr.zc = ZSTD_createCCtx(); esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX); if (!esr.dict || !esr.zc || !esr.workPlace) { eSize = ERROR(memory_allocation); DISPLAYLEVEL(1, "Not enough memory \n"); goto _cleanup; } /* collect stats on all samples */ for (u=0; u dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; { size_t const dictSize = hSize + dictContentSize; char* dictEnd = (char*)dictBuffer + dictSize; memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); memcpy(dictBuffer, header, hSize); return dictSize; } } static size_t ZDICT_addEntropyTablesFromBuffer_advanced( void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t params) { int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel; U32 const notificationLevel = params.notificationLevel; size_t hSize = 8; /* calculate entropy tables */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ DISPLAYLEVEL(2, "statistics ... \n"); { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize, compressionLevel, samplesBuffer, samplesSizes, nbSamples, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, notificationLevel); if (ZDICT_isError(eSize)) return eSize; hSize += eSize; } /* add dictionary header (after entropy tables) */ MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY); { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0); U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768; U32 const dictID = params.dictID ? params.dictID : compliantID; MEM_writeLE32((char*)dictBuffer+4, dictID); } if (hSize + dictContentSize < dictBufferCapacity) memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize); return MIN(dictBufferCapacity, hSize+dictContentSize); } /* Hidden declaration for dbio.c */ size_t ZDICT_trainFromBuffer_unsafe_legacy( void* dictBuffer, size_t maxDictSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params); /*! ZDICT_trainFromBuffer_unsafe_legacy() : * Warning : `samplesBuffer` must be followed by noisy guard band. * @return : size of dictionary, or an error code which can be tested with ZDICT_isError() */ size_t ZDICT_trainFromBuffer_unsafe_legacy( void* dictBuffer, size_t maxDictSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) { U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16)); dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList)); unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel; unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity; size_t const targetDictSize = maxDictSize; size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); size_t dictSize = 0; U32 const notificationLevel = params.zParams.notificationLevel; /* checks */ if (!dictList) return ERROR(memory_allocation); if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */ if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */ /* init */ ZDICT_initDictItem(dictList); /* build dictionary */ ZDICT_trainBuffer_legacy(dictList, dictListSize, samplesBuffer, samplesBuffSize, samplesSizes, nbSamples, minRep, notificationLevel); /* display best matches */ if (params.zParams.notificationLevel>= 3) { unsigned const nb = MIN(25, dictList[0].pos); unsigned const dictContentSize = ZDICT_dictSize(dictList); unsigned u; DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize); DISPLAYLEVEL(3, "list %u best segments \n", nb-1); for (u=1; u samplesBuffSize) || ((pos + length) > samplesBuffSize)) { free(dictList); return ERROR(GENERIC); /* should never happen */ } DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |", u, length, pos, (unsigned)dictList[u].savings); ZDICT_printHex((const char*)samplesBuffer+pos, printedLength); DISPLAYLEVEL(3, "| \n"); } } /* create dictionary */ { unsigned dictContentSize = ZDICT_dictSize(dictList); if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */ if (dictContentSize < targetDictSize/4) { DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize); if (samplesBuffSize < 10 * targetDictSize) DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20)); if (minRep > MINRATIO) { DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1); DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n"); } } if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) { unsigned proposedSelectivity = selectivity-1; while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; } DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize); DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity); DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n"); } /* limit dictionary size */ { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */ U32 currentSize = 0; U32 n; for (n=1; n targetDictSize) { currentSize -= dictList[n].length; break; } } dictList->pos = n; dictContentSize = currentSize; } /* build dict content */ { U32 u; BYTE* ptr = (BYTE*)dictBuffer + maxDictSize; for (u=1; upos; u++) { U32 l = dictList[u].length; ptr -= l; if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */ memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l); } } dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize, samplesBuffer, samplesSizes, nbSamples, params.zParams); } /* clean up */ free(dictList); return dictSize; } /* ZDICT_trainFromBuffer_legacy() : * issue : samplesBuffer need to be followed by a noisy guard band. * work around : duplicate the buffer, and add the noise */ size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) { size_t result; void* newBuff; size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */ newBuff = malloc(sBuffSize + NOISELENGTH); if (!newBuff) return ERROR(memory_allocation); memcpy(newBuff, samplesBuffer, sBuffSize); ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */ result = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff, samplesSizes, nbSamples, params); free(newBuff); return result; } size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) { ZDICT_fastCover_params_t params; DEBUGLOG(3, "ZDICT_trainFromBuffer"); memset(¶ms, 0, sizeof(params)); params.d = 8; params.steps = 4; /* Default to level 6 since no compression level information is available */ params.zParams.compressionLevel = 3; #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1) params.zParams.notificationLevel = DEBUGLEVEL; #endif return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, ¶ms); } size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) { ZDICT_params_t params; memset(¶ms, 0, sizeof(params)); return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, params); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/dictBuilder/cover.h0000644000175000017500000001156613771325506025062 0ustar useruser00000000000000/* * Copyright (c) 2017-2020, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include /* fprintf */ #include /* malloc, free, qsort */ #include /* memset */ #include /* clock */ #include "../common/mem.h" /* read */ #include "../common/pool.h" #include "../common/threading.h" #include "../common/zstd_internal.h" /* includes zstd.h */ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY #endif #include "zdict.h" /** * COVER_best_t is used for two purposes: * 1. Synchronizing threads. * 2. Saving the best parameters and dictionary. * * All of the methods except COVER_best_init() are thread safe if zstd is * compiled with multithreaded support. */ typedef struct COVER_best_s { ZSTD_pthread_mutex_t mutex; ZSTD_pthread_cond_t cond; size_t liveJobs; void *dict; size_t dictSize; ZDICT_cover_params_t parameters; size_t compressedSize; } COVER_best_t; /** * A segment is a range in the source as well as the score of the segment. */ typedef struct { U32 begin; U32 end; U32 score; } COVER_segment_t; /** *Number of epochs and size of each epoch. */ typedef struct { U32 num; U32 size; } COVER_epoch_info_t; /** * Struct used for the dictionary selection function. */ typedef struct COVER_dictSelection { BYTE* dictContent; size_t dictSize; size_t totalCompressedSize; } COVER_dictSelection_t; /** * Computes the number of epochs and the size of each epoch. * We will make sure that each epoch gets at least 10 * k bytes. * * The COVER algorithms divide the data up into epochs of equal size and * select one segment from each epoch. * * @param maxDictSize The maximum allowed dictionary size. * @param nbDmers The number of dmers we are training on. * @param k The parameter k (segment size). * @param passes The target number of passes over the dmer corpus. * More passes means a better dictionary. */ COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers, U32 k, U32 passes); /** * Warns the user when their corpus is too small. */ void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel); /** * Checks total compressed size of a dictionary */ size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, const size_t *samplesSizes, const BYTE *samples, size_t *offsets, size_t nbTrainSamples, size_t nbSamples, BYTE *const dict, size_t dictBufferCapacity); /** * Returns the sum of the sample sizes. */ size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ; /** * Initialize the `COVER_best_t`. */ void COVER_best_init(COVER_best_t *best); /** * Wait until liveJobs == 0. */ void COVER_best_wait(COVER_best_t *best); /** * Call COVER_best_wait() and then destroy the COVER_best_t. */ void COVER_best_destroy(COVER_best_t *best); /** * Called when a thread is about to be launched. * Increments liveJobs. */ void COVER_best_start(COVER_best_t *best); /** * Called when a thread finishes executing, both on error or success. * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, COVER_dictSelection_t selection); /** * Error function for COVER_selectDict function. Checks if the return * value is an error. */ unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection); /** * Error function for COVER_selectDict function. Returns a struct where * return.totalCompressedSize is a ZSTD error. */ COVER_dictSelection_t COVER_dictSelectionError(size_t error); /** * Always call after selectDict is called to free up used memory from * newly created dictionary. */ void COVER_dictSelectionFree(COVER_dictSelection_t selection); /** * Called to finalize the dictionary and select one based on whether or not * the shrink-dict flag was enabled. If enabled the dictionary used is the * smallest dictionary within a specified regression of the compressed size * from the largest dictionary. */ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize); borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/0000755000175000017500000000000013771325773023502 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.c0000644000175000017500000002161513771325506026000 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* zstd_ddict.c : * concentrates all logic that needs to know the internals of ZSTD_DDict object */ /*-******************************************************* * Dependencies *********************************************************/ #include /* memcpy, memmove, memset */ #include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_decompress_internal.h" #include "zstd_ddict.h" #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) # include "../legacy/zstd_legacy.h" #endif /*-******************************************************* * Types *********************************************************/ struct ZSTD_DDict_s { void* dictBuffer; const void* dictContent; size_t dictSize; ZSTD_entropyDTables_t entropy; U32 dictID; U32 entropyPresent; ZSTD_customMem cMem; }; /* typedef'd to ZSTD_DDict within "zstd.h" */ const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict) { assert(ddict != NULL); return ddict->dictContent; } size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict) { assert(ddict != NULL); return ddict->dictSize; } void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { DEBUGLOG(4, "ZSTD_copyDDictParameters"); assert(dctx != NULL); assert(ddict != NULL); dctx->dictID = ddict->dictID; dctx->prefixStart = ddict->dictContent; dctx->virtualStart = ddict->dictContent; dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; dctx->previousDstEnd = dctx->dictEnd; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentBeginForFuzzing = dctx->prefixStart; dctx->dictContentEndForFuzzing = dctx->previousDstEnd; #endif if (ddict->entropyPresent) { dctx->litEntropy = 1; dctx->fseEntropy = 1; dctx->LLTptr = ddict->entropy.LLTable; dctx->MLTptr = ddict->entropy.MLTable; dctx->OFTptr = ddict->entropy.OFTable; dctx->HUFptr = ddict->entropy.hufTable; dctx->entropy.rep[0] = ddict->entropy.rep[0]; dctx->entropy.rep[1] = ddict->entropy.rep[1]; dctx->entropy.rep[2] = ddict->entropy.rep[2]; } else { dctx->litEntropy = 0; dctx->fseEntropy = 0; } } static size_t ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType) { ddict->dictID = 0; ddict->entropyPresent = 0; if (dictContentType == ZSTD_dct_rawContent) return 0; if (ddict->dictSize < 8) { if (dictContentType == ZSTD_dct_fullDict) return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ return 0; /* pure content mode */ } { U32 const magic = MEM_readLE32(ddict->dictContent); if (magic != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_fullDict) return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ return 0; /* pure content mode */ } } ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE); /* load entropy tables */ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy( &ddict->entropy, ddict->dictContent, ddict->dictSize)), dictionary_corrupted, ""); ddict->entropyPresent = 1; return 0; } static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) { ddict->dictBuffer = NULL; ddict->dictContent = dict; if (!dict) dictSize = 0; } else { void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem); ddict->dictBuffer = internalBuffer; ddict->dictContent = internalBuffer; if (!internalBuffer) return ERROR(memory_allocation); memcpy(internalBuffer, dict, dictSize); } ddict->dictSize = dictSize; ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , ""); return 0; } ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem) { if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); if (ddict == NULL) return NULL; ddict->cMem = customMem; { size_t const initResult = ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType); if (ZSTD_isError(initResult)) { ZSTD_freeDDict(ddict); return NULL; } } return ddict; } } /*! ZSTD_createDDict() : * Create a digested dictionary, to start decompression without startup delay. * `dict` content is copied inside DDict. * Consequently, `dict` can be released after `ZSTD_DDict` creation */ ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator); } /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, to start decompression without startup delay. * Dictionary content is simply referenced, it will be accessed during decompression. * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator); } const ZSTD_DDict* ZSTD_initStaticDDict( void* sBuffer, size_t sBufferSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { size_t const neededSpace = sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer; assert(sBuffer != NULL); assert(dict != NULL); if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */ if (sBufferSize < neededSpace) return NULL; if (dictLoadMethod == ZSTD_dlm_byCopy) { memcpy(ddict+1, dict, dictSize); /* local copy */ dict = ddict+1; } if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) )) return NULL; return ddict; } size_t ZSTD_freeDDict(ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = ddict->cMem; ZSTD_free(ddict->dictBuffer, cMem); ZSTD_free(ddict, cMem); return 0; } } /*! ZSTD_estimateDDictSize() : * Estimate amount of memory that will be needed to create a dictionary for decompression. * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) { return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); } size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support sizeof on NULL */ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ; } /*! ZSTD_getDictID_fromDDict() : * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/zstd_decompress.c0000644000175000017500000023124413771325506027056 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * HEAPMODE : * Select how default decompression function ZSTD_decompress() allocates its context, * on stack (0), or into heap (1, default; requires malloc()). * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected. */ #ifndef ZSTD_HEAPMODE # define ZSTD_HEAPMODE 1 #endif /*! * LEGACY_SUPPORT : * if set to 1+, ZSTD_decompress() can decode older formats (v0.1+) */ #ifndef ZSTD_LEGACY_SUPPORT # define ZSTD_LEGACY_SUPPORT 0 #endif /*! * MAXWINDOWSIZE_DEFAULT : * maximum window size accepted by DStream __by default__. * Frames requiring more memory will be rejected. * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize(). */ #ifndef ZSTD_MAXWINDOWSIZE_DEFAULT # define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1) #endif /*! * NO_FORWARD_PROGRESS_MAX : * maximum allowed nb of calls to ZSTD_decompressStream() * without any forward progress * (defined as: no byte read from input, and no byte flushed to output) * before triggering an error. */ #ifndef ZSTD_NO_FORWARD_PROGRESS_MAX # define ZSTD_NO_FORWARD_PROGRESS_MAX 16 #endif /*-******************************************************* * Dependencies *********************************************************/ #include /* memcpy, memmove, memset */ #include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/zstd_internal.h" /* blockProperties_t */ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) # include "../legacy/zstd_legacy.h" #endif /*-************************************************************* * Context management ***************************************************************/ size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support sizeof NULL */ return sizeof(*dctx) + ZSTD_sizeof_DDict(dctx->ddictLocal) + dctx->inBuffSize + dctx->outBuffSize; } size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } static size_t ZSTD_startingInputLength(ZSTD_format_e format) { size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format); /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); return startingInputLength; } static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) { dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */ dctx->staticSize = 0; dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; dctx->ddict = NULL; dctx->ddictLocal = NULL; dctx->dictEnd = NULL; dctx->ddictIsCold = 0; dctx->dictUses = ZSTD_dont_use; dctx->inBuff = NULL; dctx->inBuffSize = 0; dctx->outBuffSize = 0; dctx->streamStage = zdss_init; dctx->legacyContext = NULL; dctx->previousLegacyVersion = 0; dctx->noForwardProgress = 0; dctx->oversizedDuration = 0; dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); dctx->outBufferMode = ZSTD_obm_buffered; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentEndForFuzzing = NULL; #endif } ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) { ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace; if ((size_t)workspace & 7) return NULL; /* 8-aligned */ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ ZSTD_initDCtx_internal(dctx); dctx->staticSize = workspaceSize; dctx->inBuff = (char*)(dctx+1); return dctx; } ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) { if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem); if (!dctx) return NULL; dctx->customMem = customMem; ZSTD_initDCtx_internal(dctx); return dctx; } } ZSTD_DCtx* ZSTD_createDCtx(void) { DEBUGLOG(3, "ZSTD_createDCtx"); return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); } static void ZSTD_clearDict(ZSTD_DCtx* dctx) { ZSTD_freeDDict(dctx->ddictLocal); dctx->ddictLocal = NULL; dctx->ddict = NULL; dctx->dictUses = ZSTD_dont_use; } size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx"); { ZSTD_customMem const cMem = dctx->customMem; ZSTD_clearDict(dctx); ZSTD_free(dctx->inBuff, cMem); dctx->inBuff = NULL; #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (dctx->legacyContext) ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion); #endif ZSTD_free(dctx, cMem); return 0; } } /* no longer useful */ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) { size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ } /*-************************************************************* * Frame header decoding ***************************************************************/ /*! ZSTD_isFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier. * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ unsigned ZSTD_isFrame(const void* buffer, size_t size) { if (size < ZSTD_FRAMEIDSIZE) return 0; { U32 const magic = MEM_readLE32(buffer); if (magic == ZSTD_MAGICNUMBER) return 1; if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; } #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(buffer, size)) return 1; #endif return 0; } /** ZSTD_frameHeaderSize_internal() : * srcSize must be large enough to reach header size fields. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. * @return : size of the Frame Header * or an error code, which can be tested with ZSTD_isError() */ static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) { size_t const minInputSize = ZSTD_startingInputLength(format); RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, ""); { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; U32 const dictID= fhd & 3; U32 const singleSegment = (fhd >> 5) & 1; U32 const fcsId = fhd >> 6; return minInputSize + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId); } } /** ZSTD_frameHeaderSize() : * srcSize must be >= ZSTD_frameHeaderSize_prefix. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) { return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1); } /** ZSTD_getFrameHeader_advanced() : * decode Frame Header, or require larger `srcSize`. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */ if (srcSize < minInputSize) return minInputSize; RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter"); if ( (format != ZSTD_f_zstd1_magicless) && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ memset(zfhPtr, 0, sizeof(*zfhPtr)); zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); zfhPtr->frameType = ZSTD_skippableFrame; return 0; } RETURN_ERROR(prefix_unknown, ""); } /* ensure there is enough `srcSize` to fully read/decode frame header */ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); if (srcSize < fhsize) return fhsize; zfhPtr->headerSize = (U32)fhsize; } { BYTE const fhdByte = ip[minInputSize-1]; size_t pos = minInputSize; U32 const dictIDSizeCode = fhdByte&3; U32 const checksumFlag = (fhdByte>>2)&1; U32 const singleSegment = (fhdByte>>5)&1; U32 const fcsID = fhdByte>>6; U64 windowSize = 0; U32 dictID = 0; U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN; RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported, "reserved bits, must be zero"); if (!singleSegment) { BYTE const wlByte = ip[pos++]; U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, ""); windowSize = (1ULL << windowLog); windowSize += (windowSize >> 3) * (wlByte&7); } switch(dictIDSizeCode) { default: assert(0); /* impossible */ case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; } switch(fcsID) { default: assert(0); /* impossible */ case 0 : if (singleSegment) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; case 3 : frameContentSize = MEM_readLE64(ip+pos); break; } if (singleSegment) windowSize = frameContentSize; zfhPtr->frameType = ZSTD_frame; zfhPtr->frameContentSize = frameContentSize; zfhPtr->windowSize = windowSize; zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); zfhPtr->dictID = dictID; zfhPtr->checksumFlag = checksumFlag; } return 0; } /** ZSTD_getFrameHeader() : * decode Frame Header, or require larger `srcSize`. * note : this function does not consume input, it only reads it. * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) { return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); } /** ZSTD_getFrameContentSize() : * compatible with legacy mode * @return : decompressed size of the single frame pointed to be `src` if known, otherwise * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) { unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize); return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; } #endif { ZSTD_frameHeader zfh; if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; if (zfh.frameType == ZSTD_skippableFrame) { return 0; } else { return zfh.frameContentSize; } } } static size_t readSkippableFrameSize(void const* src, size_t srcSize) { size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE; U32 sizeU32; RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, frameParameter_unsupported, ""); { size_t const skippableSize = skippableHeaderSize + sizeU32; RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); return skippableSize; } } /** ZSTD_findDecompressedSize() : * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or * skippable frames * @return : decompressed size of the frames contained */ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) { unsigned long long totalDstSize = 0; while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) { U32 const magicNumber = MEM_readLE32(src); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); if (ZSTD_isError(skippableSize)) { return ZSTD_CONTENTSIZE_ERROR; } assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; } { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; /* check for overflow */ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; totalDstSize += ret; } { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); if (ZSTD_isError(frameSrcSize)) { return ZSTD_CONTENTSIZE_ERROR; } src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; } } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ if (srcSize) return ZSTD_CONTENTSIZE_ERROR; return totalDstSize; } /** ZSTD_getDecompressedSize() : * compatible with legacy mode * @return : decompressed size if known, 0 otherwise note : 0 can mean any of the following : - frame content is empty - decompressed size field is not present in frame header - frame header unknown / not supported - frame header not complete (`srcSize` too small) */ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN); return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret; } /** ZSTD_decodeFrameHeader() : * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) { size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format); if (ZSTD_isError(result)) return result; /* invalid header */ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small"); #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* Skip the dictID check in fuzzing mode, because it makes the search * harder. */ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID), dictionary_wrong, ""); #endif if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); return 0; } static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) { ZSTD_frameSizeInfo frameSizeInfo; frameSizeInfo.compressedSize = ret; frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; return frameSizeInfo; } static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize) { ZSTD_frameSizeInfo frameSizeInfo; memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameSizeInfoLegacy(src, srcSize); #endif if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE) && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); assert(ZSTD_isError(frameSizeInfo.compressedSize) || frameSizeInfo.compressedSize <= srcSize); return frameSizeInfo; } else { const BYTE* ip = (const BYTE*)src; const BYTE* const ipstart = ip; size_t remainingSize = srcSize; size_t nbBlocks = 0; ZSTD_frameHeader zfh; /* Extract Frame Header */ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(ret)) return ZSTD_errorFrameSizeInfo(ret); if (ret > 0) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); } ip += zfh.headerSize; remainingSize -= zfh.headerSize; /* Iterate over each block */ while (1) { blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return ZSTD_errorFrameSizeInfo(cBlockSize); if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); ip += ZSTD_blockHeaderSize + cBlockSize; remainingSize -= ZSTD_blockHeaderSize + cBlockSize; nbBlocks++; if (blockProperties.lastBlock) break; } /* Final frame content checksum */ if (zfh.checksumFlag) { if (remainingSize < 4) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); ip += 4; } frameSizeInfo.compressedSize = ip - ipstart; frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) ? zfh.frameContentSize : nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } /** ZSTD_findFrameCompressedSize() : * compatible with legacy mode * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame * `srcSize` must be at least as large as the frame contained * @return : the compressed size of the frame starting at `src` */ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) { ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); return frameSizeInfo.compressedSize; } /** ZSTD_decompressBound() : * compatible with legacy mode * `src` must point to the start of a ZSTD frame or a skippeable frame * `srcSize` must be at least as large as the frame contained * @return : the maximum decompressed size of the compressed source */ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) { unsigned long long bound = 0; /* Iterate over each frame */ while (srcSize > 0) { ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); size_t const compressedSize = frameSizeInfo.compressedSize; unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) return ZSTD_CONTENTSIZE_ERROR; assert(srcSize >= compressedSize); src = (const BYTE*)src + compressedSize; srcSize -= compressedSize; bound += decompressedBound; } return bound; } /*-************************************************************* * Frame decoding ***************************************************************/ /** ZSTD_insertBlock() : * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) { DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize); ZSTD_checkContinuity(dctx, blockStart); dctx->previousDstEnd = (const char*)blockStart + blockSize; return blockSize; } static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_copyRawBlock"); if (dst == NULL) { if (srcSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, ""); memcpy(dst, src, srcSize); return srcSize; } static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, BYTE b, size_t regenSize) { if (dst == NULL) { if (regenSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, ""); memset(dst, b, regenSize); return regenSize; } /*! ZSTD_decompressFrame() : * @dctx must be properly initialized * will update *srcPtr and *srcSizePtr, * to make *srcPtr progress by one frame. */ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void** srcPtr, size_t *srcSizePtr) { const BYTE* ip = (const BYTE*)(*srcPtr); BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; BYTE* op = ostart; size_t remainingSrcSize = *srcSizePtr; DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr); /* check */ RETURN_ERROR_IF( remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize, srcSize_wrong, ""); /* Frame Header */ { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal( ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize, srcSize_wrong, ""); FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , ""); ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; } /* Loop on each block */ while (1) { size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return cBlockSize; ip += ZSTD_blockHeaderSize; remainingSrcSize -= ZSTD_blockHeaderSize; RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); switch(blockProperties.blockType) { case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1); break; case bt_raw : decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); break; case bt_rle : decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize); break; case bt_reserved : default: RETURN_ERROR(corruption_detected, "invalid block type"); } if (ZSTD_isError(decodedSize)) return decodedSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize); if (decodedSize != 0) op += decodedSize; assert(ip != NULL); ip += cBlockSize; remainingSrcSize -= cBlockSize; if (blockProperties.lastBlock) break; } if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize, corruption_detected, ""); } if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); U32 checkRead; RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, ""); checkRead = MEM_readLE32(ip); RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, ""); ip += 4; remainingSrcSize -= 4; } /* Allow caller to get size read */ *srcPtr = ip; *srcSizePtr = remainingSrcSize; return op-ostart; } static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, const ZSTD_DDict* ddict) { void* const dststart = dst; int moreThan1Frame = 0; DEBUGLOG(5, "ZSTD_decompressMultiFrame"); assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */ if (ddict) { dict = ZSTD_DDict_dictContent(ddict); dictSize = ZSTD_DDict_dictSize(ddict); } while (srcSize >= ZSTD_startingInputLength(dctx->format)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) { size_t decodedSize; size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); if (ZSTD_isError(frameSize)) return frameSize; RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "legacy support is not compatible with static dctx"); decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); if (ZSTD_isError(decodedSize)) return decodedSize; assert(decodedSize <=- dstCapacity); dst = (BYTE*)dst + decodedSize; dstCapacity -= decodedSize; src = (const BYTE*)src + frameSize; srcSize -= frameSize; continue; } #endif { U32 const magicNumber = MEM_readLE32(src); DEBUGLOG(4, "reading magic number %08X (expecting %08X)", (unsigned)magicNumber, ZSTD_MAGICNUMBER); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed"); assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; } } if (ddict) { /* we were called from ZSTD_decompress_usingDDict */ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), ""); } else { /* this will initialize correctly with no dict if dict == NULL, so * use this in all cases but ddict */ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), ""); } ZSTD_checkContinuity(dctx, dst); { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); RETURN_ERROR_IF( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown) && (moreThan1Frame==1), srcSize_wrong, "at least one frame successfully completed, but following " "bytes are garbage: it's more likely to be a srcSize error, " "specifying more bytes than compressed size of frame(s). This " "error message replaces ERROR(prefix_unknown), which would be " "confusing, as the first header is actually correct. Note that " "one could be unlucky, it might be a corruption error instead, " "happening right at the place where we expect zstd magic " "bytes. But this is _much_ less likely than a srcSize field " "error."); if (ZSTD_isError(res)) return res; assert(res <= dstCapacity); if (res != 0) dst = (BYTE*)dst + res; dstCapacity -= res; } moreThan1Frame = 1; } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed"); return (BYTE*)dst - (BYTE*)dststart; } size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize) { return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); } static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) { switch (dctx->dictUses) { default: assert(0 /* Impossible */); /* fall-through */ case ZSTD_dont_use: ZSTD_clearDict(dctx); return NULL; case ZSTD_use_indefinitely: return dctx->ddict; case ZSTD_use_once: dctx->dictUses = ZSTD_dont_use; return dctx->ddict; } } size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx)); } size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) size_t regenSize; ZSTD_DCtx* const dctx = ZSTD_createDCtx(); RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTD_freeDCtx(dctx); return regenSize; #else /* stack mode */ ZSTD_DCtx dctx; ZSTD_initDCtx_internal(&dctx); return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); #endif } /*-************************************** * Advanced Streaming Decompression API * Bufferless and synchronous ****************************************/ size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } /** * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed, * we allow taking a partial block as the input. Currently only raw uncompressed blocks can * be streamed. * * For blocks that can be streamed, this allows us to reduce the latency until we produce * output, and avoid copying the input. * * @param inputSize - The total amount of input that the caller currently has. */ static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) { if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock)) return dctx->expected; if (dctx->bType != bt_raw) return dctx->expected; return MIN(MAX(inputSize, 1), dctx->expected); } ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { switch(dctx->stage) { default: /* should not happen */ assert(0); case ZSTDds_getFrameHeaderSize: case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader; case ZSTDds_decompressBlock: return ZSTDnit_block; case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock; case ZSTDds_checkChecksum: return ZSTDnit_checksum; case ZSTDds_decodeSkippableHeader: case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; } } static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /** ZSTD_decompressContinue() : * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize); /* Sanity check */ RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed"); if (dstCapacity) ZSTD_checkContinuity(dctx, dst); switch (dctx->stage) { case ZSTDds_getFrameHeaderSize : assert(src != NULL); if (dctx->format == ZSTD_f_zstd1) { /* allows header */ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */ dctx->stage = ZSTDds_decodeSkippableHeader; return 0; } } dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = dctx->headerSize - srcSize; dctx->stage = ZSTDds_decodeFrameHeader; return 0; case ZSTDds_decodeFrameHeader: assert(src != NULL); memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), ""); dctx->expected = ZSTD_blockHeaderSize; dctx->stage = ZSTDds_decodeBlockHeader; return 0; case ZSTDds_decodeBlockHeader: { blockProperties_t bp; size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(cBlockSize)) return cBlockSize; RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum"); dctx->expected = cBlockSize; dctx->bType = bp.blockType; dctx->rleSize = bp.origSize; if (cBlockSize) { dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; return 0; } /* empty block */ if (bp.lastBlock) { if (dctx->fParams.checksumFlag) { dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { dctx->expected = 0; /* end of frame */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */ dctx->stage = ZSTDds_decodeBlockHeader; } return 0; } case ZSTDds_decompressLastBlock: case ZSTDds_decompressBlock: DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock"); { size_t rSize; switch(dctx->bType) { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : assert(srcSize <= dctx->expected); rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed"); assert(rSize == srcSize); dctx->expected -= rSize; break; case bt_rle : rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize); dctx->expected = 0; /* Streaming not supported */ break; case bt_reserved : /* should never happen */ default: RETURN_ERROR(corruption_detected, "invalid block type"); } FORWARD_IF_ERROR(rSize, ""); RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum"); DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize); dctx->decodedSize += rSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); dctx->previousDstEnd = (char*)dst + rSize; /* Stay on the same stage until we are finished streaming the block. */ if (dctx->expected > 0) { return rSize; } if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize); RETURN_ERROR_IF( dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && dctx->decodedSize != dctx->fParams.frameContentSize, corruption_detected, ""); if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { dctx->expected = 0; /* ends here */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTD_blockHeaderSize; } return rSize; } case ZSTDds_checkChecksum: assert(srcSize == 4); /* guaranteed by dctx->expected */ { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); U32 const check32 = MEM_readLE32(src); DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32); RETURN_ERROR_IF(check32 != h32, checksum_wrong, ""); dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; } case ZSTDds_decodeSkippableHeader: assert(src != NULL); assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ dctx->stage = ZSTDds_skipFrame; return 0; case ZSTDds_skipFrame: dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; default: assert(0); /* impossible */ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ } } static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { dctx->dictEnd = dctx->previousDstEnd; dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); dctx->prefixStart = dict; dctx->previousDstEnd = (const char*)dict + dictSize; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentBeginForFuzzing = dctx->prefixStart; dctx->dictContentEndForFuzzing = dctx->previousDstEnd; #endif return 0; } /*! ZSTD_loadDEntropy() : * dict : must point at beginning of a valid zstd dictionary. * @return : size of entropy tables read */ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small"); assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */ dictPtr += 8; /* skip header = magic + dictID */ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable)); ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable)); ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE); { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable); #ifdef HUF_FORCE_DECOMPRESS_X1 /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, workspace, workspaceSize); #else size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, workspace, workspaceSize); #endif RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; } { short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff, offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->OFTable, offcodeNCount, offcodeMaxValue, OF_base, OF_bits, offcodeLog); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->MLTable, matchlengthNCount, matchlengthMaxValue, ML_base, ML_bits, matchlengthLog); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->LLTable, litlengthNCount, litlengthMaxValue, LL_base, LL_bits, litlengthLog); dictPtr += litlengthHeaderSize; } RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); { int i; size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); for (i=0; i<3; i++) { U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; RETURN_ERROR_IF(rep==0 || rep > dictContentSize, dictionary_corrupted, ""); entropy->rep[i] = rep; } } return dictPtr - (const BYTE*)dict; } static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); { U32 const magic = MEM_readLE32(dict); if (magic != ZSTD_MAGIC_DICTIONARY) { return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ } } dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); /* load entropy tables */ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, ""); dict = (const char*)dict + eSize; dictSize -= eSize; } dctx->litEntropy = dctx->fseEntropy = 1; /* reference dictionary content */ return ZSTD_refDictContent(dctx, dict, dictSize); } size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) { assert(dctx != NULL); dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ dctx->stage = ZSTDds_getFrameHeaderSize; dctx->decodedSize = 0; dctx->previousDstEnd = NULL; dctx->prefixStart = NULL; dctx->virtualStart = NULL; dctx->dictEnd = NULL; dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; dctx->bType = bt_reserved; ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ dctx->LLTptr = dctx->entropy.LLTable; dctx->MLTptr = dctx->entropy.MLTable; dctx->OFTptr = dctx->entropy.OFTable; dctx->HUFptr = dctx->entropy.hufTable; return 0; } size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (dict && dictSize) RETURN_ERROR_IF( ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)), dictionary_corrupted, ""); return 0; } /* ====== ZSTD_DDict ====== */ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict"); assert(dctx != NULL); if (ddict) { const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict); size_t const dictSize = ZSTD_DDict_dictSize(ddict); const void* const dictEnd = dictStart + dictSize; dctx->ddictIsCold = (dctx->dictEnd != dictEnd); DEBUGLOG(4, "DDict is %s", dctx->ddictIsCold ? "~cold~" : "hot!"); } FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (ddict) { /* NULL ddict is equivalent to no dictionary */ ZSTD_copyDDictParameters(dctx, ddict); } return 0; } /*! ZSTD_getDictID_fromDict() : * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) { if (dictSize < 8) return 0; if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0; return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); } /*! ZSTD_getDictID_fromFrame() : * Provides the dictID required to decompress frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. * Needed dictionary is a hidden information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. * - This is not a Zstandard frame. * When identifying the exact failure cause, it's possible to use * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; } /*! ZSTD_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Use dictionary without significant overhead. */ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_DDict* ddict) { /* pass content and size in case legacy frames are encountered */ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict); } /*===================================== * Streaming decompression *====================================*/ ZSTD_DStream* ZSTD_createDStream(void) { DEBUGLOG(3, "ZSTD_createDStream"); return ZSTD_createDStream_advanced(ZSTD_defaultCMem); } ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticDCtx(workspace, workspaceSize); } ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) { return ZSTD_createDCtx_advanced(customMem); } size_t ZSTD_freeDStream(ZSTD_DStream* zds) { return ZSTD_freeDCtx(zds); } /* *** Initialization *** */ size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; } size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); if (dict && dictSize != 0) { dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!"); dctx->ddict = dctx->ddictLocal; dctx->dictUses = ZSTD_use_indefinitely; } return 0; } size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); } size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), ""); dctx->dictUses = ZSTD_use_once; return 0; } size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize) { return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent); } /* ZSTD_initDStream_usingDict() : * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) { DEBUGLOG(4, "ZSTD_initDStream_usingDict"); FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , ""); return ZSTD_startingInputLength(zds->format); } /* note : this variant can't fail */ size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); return ZSTD_initDStream_usingDDict(zds, NULL); } /* ZSTD_initDStream_usingDDict() : * ddict will just be referenced, and must outlive decompression session * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); return ZSTD_startingInputLength(dctx->format); } /* ZSTD_resetDStream() : * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); return ZSTD_startingInputLength(dctx->format); } size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); if (ddict) { dctx->ddict = ddict; dctx->dictUses = ZSTD_use_indefinitely; } return 0; } /* ZSTD_DCtx_setMaxWindowSize() : * note : no direct equivalence in ZSTD_DCtx_setParameter, * since this version sets windowSize, and the other sets windowLog */ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) { ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax); size_t const min = (size_t)1 << bounds.lowerBound; size_t const max = (size_t)1 << bounds.upperBound; RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, ""); RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, ""); dctx->maxWindowSize = maxWindowSize; return 0; } size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) { return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format); } ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) { ZSTD_bounds bounds = { 0, 0, 0 }; switch(dParam) { case ZSTD_d_windowLogMax: bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN; bounds.upperBound = ZSTD_WINDOWLOG_MAX; return bounds; case ZSTD_d_format: bounds.lowerBound = (int)ZSTD_f_zstd1; bounds.upperBound = (int)ZSTD_f_zstd1_magicless; ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); return bounds; case ZSTD_d_stableOutBuffer: bounds.lowerBound = (int)ZSTD_obm_buffered; bounds.upperBound = (int)ZSTD_obm_stable; return bounds; default:; } bounds.error = ERROR(parameter_unsupported); return bounds; } /* ZSTD_dParam_withinBounds: * @return 1 if value is within dParam bounds, * 0 otherwise */ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) { ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam); if (ZSTD_isError(bounds.error)) return 0; if (value < bounds.lowerBound) return 0; if (value > bounds.upperBound) return 0; return 1; } #define CHECK_DBOUNDS(p,v) { \ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \ } size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); switch(dParam) { case ZSTD_d_windowLogMax: if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT; CHECK_DBOUNDS(ZSTD_d_windowLogMax, value); dctx->maxWindowSize = ((size_t)1) << value; return 0; case ZSTD_d_format: CHECK_DBOUNDS(ZSTD_d_format, value); dctx->format = (ZSTD_format_e)value; return 0; case ZSTD_d_stableOutBuffer: CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value); dctx->outBufferMode = (ZSTD_outBufferMode_e)value; return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); } size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) { if ( (reset == ZSTD_reset_session_only) || (reset == ZSTD_reset_session_and_parameters) ) { dctx->streamStage = zdss_init; dctx->noForwardProgress = 0; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); dctx->format = ZSTD_f_zstd1; dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; } return 0; } size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) { return ZSTD_sizeof_DCtx(dctx); } size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) { size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, frameParameter_windowTooLarge, ""); return minRBSize; } size_t ZSTD_estimateDStreamSize(size_t windowSize) { size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); size_t const inBuffSize = blockSize; /* no block can be larger */ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; } size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) { U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ ZSTD_frameHeader zfh; size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(err)) return err; RETURN_ERROR_IF(err>0, srcSize_wrong, ""); RETURN_ERROR_IF(zfh.windowSize > windowSizeMax, frameParameter_windowTooLarge, ""); return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); } /* ***** Decompression ***** */ static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) { return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR; } static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) { if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize)) zds->oversizedDuration++; else zds->oversizedDuration = 0; } static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds) { return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION; } /* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output) { ZSTD_outBuffer const expect = zds->expectedOutBuffer; /* No requirement when ZSTD_obm_stable is not enabled. */ if (zds->outBufferMode != ZSTD_obm_stable) return 0; /* Any buffer is allowed in zdss_init, this must be the same for every other call until * the context is reset. */ if (zds->streamStage == zdss_init) return 0; /* The buffer must match our expectation exactly. */ if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) return 0; RETURN_ERROR(dstBuffer_wrong, "ZSTD_obm_stable enabled but output differs!"); } /* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() * and updates the stage and the output buffer state. This call is extracted so it can be * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. * NOTE: You must break after calling this function since the streamStage is modified. */ static size_t ZSTD_decompressContinueStream( ZSTD_DStream* zds, char** op, char* oend, void const* src, size_t srcSize) { int const isSkipFrame = ZSTD_isSkipFrame(zds); if (zds->outBufferMode == ZSTD_obm_buffered) { size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart; size_t const decodedSize = ZSTD_decompressContinue(zds, zds->outBuff + zds->outStart, dstSize, src, srcSize); FORWARD_IF_ERROR(decodedSize, ""); if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; } else { zds->outEnd = zds->outStart + decodedSize; zds->streamStage = zdss_flush; } } else { /* Write directly into the output buffer */ size_t const dstSize = isSkipFrame ? 0 : oend - *op; size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); FORWARD_IF_ERROR(decodedSize, ""); *op += decodedSize; /* Flushing is not needed. */ zds->streamStage = zdss_read; assert(*op <= oend); assert(zds->outBufferMode == ZSTD_obm_stable); } return 0; } size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { const char* const src = (const char*)input->src; const char* const istart = input->pos != 0 ? src + input->pos : src; const char* const iend = input->size != 0 ? src + input->size : src; const char* ip = istart; char* const dst = (char*)output->dst; char* const ostart = output->pos != 0 ? dst + output->pos : dst; char* const oend = output->size != 0 ? dst + output->size : dst; char* op = ostart; U32 someMoreWork = 1; DEBUGLOG(5, "ZSTD_decompressStream"); RETURN_ERROR_IF( input->pos > input->size, srcSize_wrong, "forbidden. in: pos: %u vs size: %u", (U32)input->pos, (U32)input->size); RETURN_ERROR_IF( output->pos > output->size, dstSize_tooSmall, "forbidden. out: pos: %u vs size: %u", (U32)output->pos, (U32)output->size); DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), ""); while (someMoreWork) { switch(zds->streamStage) { case zdss_init : DEBUGLOG(5, "stage zdss_init => transparent reset "); zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; zds->legacyVersion = 0; zds->hostageByte = 0; zds->expectedOutBuffer = *output; /* fall-through */ case zdss_loadHeader : DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) if (zds->legacyVersion) { RETURN_ERROR_IF(zds->staticSize, memory_allocation, "legacy support is incompatible with static dctx"); { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); if (hint==0) zds->streamStage = zdss_init; return hint; } } #endif { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); DEBUGLOG(5, "header size : %u", (U32)hSize); if (ZSTD_isError(hSize)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); if (legacyVersion) { ZSTD_DDict const* const ddict = ZSTD_getDDict(zds); const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL; size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0; DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion); RETURN_ERROR_IF(zds->staticSize, memory_allocation, "legacy support is incompatible with static dctx"); FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion, dict, dictSize), ""); zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input); if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */ return hint; } } #endif return hSize; /* error */ } if (hSize != 0) { /* need more input */ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ size_t const remainingInput = (size_t)(iend-ip); assert(iend >= ip); if (toLoad > remainingInput) { /* not enough input to load full header */ if (remainingInput > 0) { memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); zds->lhSize += remainingInput; } input->pos = input->size; return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; break; } } /* check for single-pass mode opportunity */ if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && zds->fParams.frameType != ZSTD_skippableFrame && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); if (cSize <= (size_t)(iend-istart)) { /* shortcut : using single-pass mode */ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds)); if (ZSTD_isError(decompressedSize)) return decompressedSize; DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") ip = istart + cSize; op += decompressedSize; zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; break; } } /* Check output buffer is large enough for ZSTD_odm_stable. */ if (zds->outBufferMode == ZSTD_obm_stable && zds->fParams.frameType != ZSTD_skippableFrame && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) { RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small"); } /* Consume header (see ZSTDds_decodeFrameHeader) */ DEBUGLOG(4, "Consume header"); FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); zds->stage = ZSTDds_skipFrame; } else { FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), ""); zds->expected = ZSTD_blockHeaderSize; zds->stage = ZSTDds_decodeBlockHeader; } /* control buffer memory usage */ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)", (U32)(zds->fParams.windowSize >>10), (U32)(zds->maxWindowSize >> 10) ); zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, frameParameter_windowTooLarge, ""); /* Adapt buffer sizes to frame header instructions */ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_obm_buffered ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize) : 0; ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize); int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); if (tooSmall || tooLarge) { size_t const bufferSize = neededInBuffSize + neededOutBuffSize; DEBUGLOG(4, "inBuff : from %u to %u", (U32)zds->inBuffSize, (U32)neededInBuffSize); DEBUGLOG(4, "outBuff : from %u to %u", (U32)zds->outBuffSize, (U32)neededOutBuffSize); if (zds->staticSize) { /* static DCtx */ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ RETURN_ERROR_IF( bufferSize > zds->staticSize - sizeof(ZSTD_DCtx), memory_allocation, ""); } else { ZSTD_free(zds->inBuff, zds->customMem); zds->inBuffSize = 0; zds->outBuffSize = 0; zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem); RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, ""); } zds->inBuffSize = neededInBuffSize; zds->outBuff = zds->inBuff + zds->inBuffSize; zds->outBuffSize = neededOutBuffSize; } } } zds->streamStage = zdss_read; /* fall-through */ case zdss_read: DEBUGLOG(5, "stage zdss_read"); { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip); DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); if (neededInSize==0) { /* end of frame */ zds->streamStage = zdss_init; someMoreWork = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); ip += neededInSize; /* Function modifies the stage so we must break */ break; } } if (ip==iend) { someMoreWork = 0; break; } /* no more input */ zds->streamStage = zdss_load; /* fall-through */ case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); size_t const toLoad = neededInSize - zds->inPos; int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; /* At this point we shouldn't be decompressing a block that we can stream. */ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip)); if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos, corruption_detected, "should never happen"); loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); } ip += loadedSize; zds->inPos += loadedSize; if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ zds->inPos = 0; /* input is consumed */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), ""); /* Function modifies the stage so we must break */ break; } case zdss_flush: { size_t const toFlushSize = zds->outEnd - zds->outStart; size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); op += flushedSize; zds->outStart += flushedSize; if (flushedSize == toFlushSize) { /* flush completed */ zds->streamStage = zdss_read; if ( (zds->outBuffSize < zds->fParams.frameContentSize) && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", (int)(zds->outBuffSize - zds->outStart), (U32)zds->fParams.blockSizeMax); zds->outStart = zds->outEnd = 0; } break; } } /* cannot complete flush */ someMoreWork = 0; break; default: assert(0); /* impossible */ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ } } /* result */ input->pos = (size_t)(ip - (const char*)(input->src)); output->pos = (size_t)(op - (char*)(output->dst)); /* Update the expected output buffer for ZSTD_obm_stable. */ zds->expectedOutBuffer = *output; if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { RETURN_ERROR_IF(op==oend, dstSize_tooSmall, ""); RETURN_ERROR_IF(ip==iend, srcSize_wrong, ""); assert(0); } } else { zds->noForwardProgress = 0; } { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); if (!nextSrcSizeHint) { /* frame fully decoded */ if (zds->outEnd == zds->outStart) { /* output fully flushed */ if (zds->hostageByte) { if (input->pos >= input->size) { /* can't release hostage (not present) */ zds->streamStage = zdss_read; return 1; } input->pos++; /* release hostage */ } /* zds->hostageByte */ return 0; } /* zds->outEnd == zds->outStart */ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ zds->hostageByte=1; } return 1; } /* nextSrcSizeHint==0 */ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */ assert(zds->inPos <= nextSrcSizeHint); nextSrcSizeHint -= zds->inPos; /* part already loaded*/ return nextSrcSizeHint; } } size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; ZSTD_inBuffer input = { src, srcSize, *srcPos }; /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); *dstPos = output.pos; *srcPos = input.pos; return cErr; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/huf_decompress.c0000644000175000017500000014405013771325506026652 0ustar useruser00000000000000/* ****************************************************************** * huff0 huffman decoder, * part of Finite State Entropy library * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Dependencies ****************************************************************/ #include /* memcpy, memset */ #include "../common/compiler.h" #include "../common/bitstream.h" /* BIT_* */ #include "../common/fse.h" /* to compress headers */ #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" /* ************************************************************** * Macros ****************************************************************/ /* These two optional macros force the use one way or another of the two * Huffman decompression implementations. You can't force in both directions * at the same time. */ #if defined(HUF_FORCE_DECOMPRESS_X1) && \ defined(HUF_FORCE_DECOMPRESS_X2) #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError /* ************************************************************** * Byte alignment for workSpace management ****************************************************************/ #define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1) #define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) /* ************************************************************** * BMI2 Variant Wrappers ****************************************************************/ #if DYNAMIC_BMI2 #define HUF_DGEN(fn) \ \ static size_t fn##_default( \ void* dst, size_t dstSize, \ const void* cSrc, size_t cSrcSize, \ const HUF_DTable* DTable) \ { \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ \ static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ void* dst, size_t dstSize, \ const void* cSrc, size_t cSrcSize, \ const HUF_DTable* DTable) \ { \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ { \ if (bmi2) { \ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ } #else #define HUF_DGEN(fn) \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ { \ (void)bmi2; \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } #endif /*-***************************/ /* generic DTableDesc */ /*-***************************/ typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) { DTableDesc dtd; memcpy(&dtd, table, sizeof(dtd)); return dtd; } #ifndef HUF_FORCE_DECOMPRESS_X2 /*-***************************/ /* single-symbol decoding */ /*-***************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { U32 tableLog = 0; U32 nbSymbols = 0; size_t iSize; void* const dtPtr = DTable + 1; HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr; U32* rankVal; BYTE* huffWeight; size_t spaceUsed32 = 0; rankVal = (U32 *)workSpace + spaceUsed32; spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32); spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* Table header */ { DTableDesc dtd = HUF_getDTableDesc(DTable); if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ dtd.tableType = 0; dtd.tableLog = (BYTE)tableLog; memcpy(DTable, &dtd, sizeof(dtd)); } /* Calculate starting value for each rank */ { U32 n, nextRankStart = 0; for (n=1; n> 1; size_t const uStart = rankVal[w]; size_t const uEnd = uStart + length; size_t u; HUF_DEltX1 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); rankVal[w] = (U32)uEnd; if (length < 4) { /* Use length in the loop bound so the compiler knows it is short. */ for (u = 0; u < length; ++u) dt[uStart + u] = D; } else { /* Unroll the loop 4 times, we know it is a power of 2. */ for (u = uStart; u < uEnd; u += 4) { dt[u + 0] = D; dt[u + 1] = D; dt[u + 2] = D; dt[u + 3] = D; } } } } return iSize; } size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_readDTableX1_wksp(DTable, src, srcSize, workSpace, sizeof(workSpace)); } FORCE_INLINE_TEMPLATE BYTE HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ BYTE const c = dt[val].byte; BIT_skipBits(Dstream, dt[val].nbBits); return c; } #define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \ *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) #define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) HINT_INLINE size_t HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { HUF_DECODE_SYMBOLX1_2(p, bitDPtr); HUF_DECODE_SYMBOLX1_1(p, bitDPtr); HUF_DECODE_SYMBOLX1_2(p, bitDPtr); HUF_DECODE_SYMBOLX1_0(p, bitDPtr); } /* [0-3] symbols remaining */ if (MEM_32bits()) while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd)) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); /* no more data to retrieve from bitstream, no need to reload */ while (p < pEnd) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); return pEnd-pStart; } FORCE_INLINE_TEMPLATE size_t HUF_decompress1X1_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { BYTE* op = (BYTE*)dst; BYTE* const oend = op + dstSize; const void* dtPtr = DTable + 1; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; BIT_DStream_t bitD; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); return dstSize; } FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* const olimit = oend - 3; const void* const dtPtr = DTable + 1; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; size_t const length1 = MEM_readLE16(istart); size_t const length2 = MEM_readLE16(istart+2); size_t const length3 = MEM_readLE16(istart+4); size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; U32 endSignal = 1; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ for ( ; (endSignal) & (op4 < olimit) ; ) { HUF_DECODE_SYMBOLX1_2(op1, &bitD1); HUF_DECODE_SYMBOLX1_2(op2, &bitD2); HUF_DECODE_SYMBOLX1_2(op3, &bitD3); HUF_DECODE_SYMBOLX1_2(op4, &bitD4); HUF_DECODE_SYMBOLX1_1(op1, &bitD1); HUF_DECODE_SYMBOLX1_1(op2, &bitD2); HUF_DECODE_SYMBOLX1_1(op3, &bitD3); HUF_DECODE_SYMBOLX1_1(op4, &bitD4); HUF_DECODE_SYMBOLX1_2(op1, &bitD1); HUF_DECODE_SYMBOLX1_2(op2, &bitD2); HUF_DECODE_SYMBOLX1_2(op3, &bitD3); HUF_DECODE_SYMBOLX1_2(op4, &bitD4); HUF_DECODE_SYMBOLX1_0(op1, &bitD1); HUF_DECODE_SYMBOLX1_0(op2, &bitD2); HUF_DECODE_SYMBOLX1_0(op3, &bitD3); HUF_DECODE_SYMBOLX1_0(op4, &bitD4); endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; } /* check corruption */ /* note : should not be necessary : op# advance in lock step, and we control op4. * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); /* check */ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endCheck) return ERROR(corruption_detected); } /* decoded size */ return dstSize; } } typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); HUF_DGEN(HUF_decompress1X1_usingDTable_internal) HUF_DGEN(HUF_decompress4X1_usingDTable_internal) size_t HUF_decompress1X1_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); } size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); } size_t HUF_decompress4X1_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); } size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } #endif /* HUF_FORCE_DECOMPRESS_X2 */ #ifndef HUF_FORCE_DECOMPRESS_X1 /* *************************/ /* double-symbols decoding */ /* *************************/ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; /* HUF_fillDTableX2Level2() : * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUF_DEltX2 DElt; U32 rankVal[HUF_TABLELOG_MAX + 1]; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ { U32 s; for (s=0; s= 1 */ rankVal[weight] += length; } } } static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUF_TABLELOG_MAX + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) { /* enough room for a second symbol */ U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { HUF_DEltX2 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; { U32 const end = start + length; U32 u; for (u = start; u < end; u++) DTable[u] = DElt; } } rankVal[weight] += length; } } size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { U32 tableLog, maxW, sizeOfSort, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); U32 const maxTableLog = dtd.maxTableLog; size_t iSize; void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; U32 *rankStart; rankValCol_t* rankVal; U32* rankStats; U32* rankStart0; sortedSymbol_t* sortedSymbol; BYTE* weightList; size_t spaceUsed32 = 0; rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32); spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; rankStats = (U32 *)workSpace + spaceUsed32; spaceUsed32 += HUF_TABLELOG_MAX + 1; rankStart0 = (U32 *)workSpace + spaceUsed32; spaceUsed32 += HUF_TABLELOG_MAX + 2; sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t); spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; weightList = (BYTE *)((U32 *)workSpace + spaceUsed32); spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); rankStart = rankStart0 + 1; memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w> consumed; } } } } HUF_fillDTableX2(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); dtd.tableLog = (BYTE)maxTableLog; dtd.tableType = 1; memcpy(DTable, &dtd, sizeof(dtd)); return iSize; } size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_readDTableX2_wksp(DTable, src, srcSize, workSpace, sizeof(workSpace)); } FORCE_INLINE_TEMPLATE U32 HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } FORCE_INLINE_TEMPLATE U32 HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); } } return 1; } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_1(p, bitDPtr); HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to end : up to 2 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); while (p <= pEnd-2) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); return p-pStart; } FORCE_INLINE_TEMPLATE size_t HUF_decompress1X2_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { BIT_DStream_t bitD; /* Init */ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); /* decode */ { BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; DTableDesc const dtd = HUF_getDTableDesc(DTable); HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); } /* check */ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); /* decoded size */ return dstSize; } FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* const olimit = oend - (sizeof(size_t)-1); const void* const dtPtr = DTable+1; const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; size_t const length1 = MEM_readLE16(istart); size_t const length2 = MEM_readLE16(istart+2); size_t const length3 = MEM_readLE16(istart+4); size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; size_t const segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal = 1; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* 16-32 symbols per loop (4-8 symbols per stream) */ for ( ; (endSignal) & (op4 < olimit); ) { #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; #else HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = (U32)LIKELY( (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); #endif } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endCheck) return ERROR(corruption_detected); } /* decoded size */ return dstSize; } } HUF_DGEN(HUF_decompress1X2_usingDTable_internal) HUF_DGEN(HUF_decompress4X2_usingDTable_internal) size_t HUF_decompress1X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); } size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } size_t HUF_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); } size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } #endif /* HUF_FORCE_DECOMPRESS_X1 */ /* ***********************************/ /* Universal decompression selectors */ /* ***********************************/ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #else return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #endif } size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #else return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #endif } #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { /* single, double, quad */ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; #endif /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-computed metrics. * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . * Assumption : 0 < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) { assert(dstSize > 0); assert(dstSize <= 128*1024); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dstSize; (void)cSrcSize; return 0; #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dstSize; (void)cSrcSize; return 1; #else /* decoder timing evaluation */ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */ U32 const D256 = (U32)(dstSize >> 8); U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ return DTime1 < DTime0; } #endif } typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; #endif /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); #else return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); #endif } } size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); #else return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; #endif } } size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize == 0) return ERROR(corruption_detected); { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #else return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #endif } } size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #else return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #endif } } size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #else return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #endif } #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } #endif size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #else return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #endif } size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize == 0) return ERROR(corruption_detected); { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); #else return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); #endif } } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_internal.h0000644000175000017500000001644313771325506030761 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* zstd_decompress_internal: * objects and definitions shared within lib/decompress modules */ #ifndef ZSTD_DECOMPRESS_INTERNAL_H #define ZSTD_DECOMPRESS_INTERNAL_H /*-******************************************************* * Dependencies *********************************************************/ #include "../common/mem.h" /* BYTE, U16, U32 */ #include "../common/zstd_internal.h" /* ZSTD_seqSymbol */ /*-******************************************************* * Constants *********************************************************/ static const U32 LL_base[MaxLL+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; static const U32 OF_base[MaxOff+1] = { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; static const U32 OF_bits[MaxOff+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; static const U32 ML_base[MaxML+1] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; /*-******************************************************* * Decompression types *********************************************************/ typedef struct { U32 fastMode; U32 tableLog; } ZSTD_seqSymbol_header; typedef struct { U16 nextState; BYTE nbAdditionalBits; BYTE nbBits; U32 baseValue; } ZSTD_seqSymbol; #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log))) typedef struct { ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ U32 rep[ZSTD_REP_NUM]; } ZSTD_entropyDTables_t; typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; typedef enum { zdss_init=0, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; typedef enum { ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */ } ZSTD_dictUses_e; typedef enum { ZSTD_obm_buffered = 0, /* Buffer the output */ ZSTD_obm_stable = 1 /* ZSTD_outBuffer is stable */ } ZSTD_outBufferMode_e; struct ZSTD_DCtx_s { const ZSTD_seqSymbol* LLTptr; const ZSTD_seqSymbol* MLTptr; const ZSTD_seqSymbol* OFTptr; const HUF_DTable* HUFptr; ZSTD_entropyDTables_t entropy; U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */ const void* previousDstEnd; /* detect continuity */ const void* prefixStart; /* start of current segment */ const void* virtualStart; /* virtual start of previous segment if it was just before current one */ const void* dictEnd; /* end of previous segment */ size_t expected; ZSTD_frameHeader fParams; U64 decodedSize; blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ ZSTD_dStage stage; U32 litEntropy; U32 fseEntropy; XXH64_state_t xxhState; size_t headerSize; ZSTD_format_e format; const BYTE* litPtr; ZSTD_customMem customMem; size_t litSize; size_t rleSize; size_t staticSize; int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ /* dictionary */ ZSTD_DDict* ddictLocal; const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */ U32 dictID; int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */ ZSTD_dictUses_e dictUses; /* streaming */ ZSTD_dStreamStage streamStage; char* inBuff; size_t inBuffSize; size_t inPos; size_t maxWindowSize; char* outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t lhSize; void* legacyContext; U32 previousLegacyVersion; U32 legacyVersion; U32 hostageByte; int noForwardProgress; ZSTD_outBufferMode_e outBufferMode; ZSTD_outBuffer expectedOutBuffer; /* workspace */ BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; size_t oversizedDuration; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION void const* dictContentBeginForFuzzing; void const* dictContentEndForFuzzing; #endif }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ /*-******************************************************* * Shared internal functions *********************************************************/ /*! ZSTD_loadDEntropy() : * dict : must point at beginning of a valid zstd dictionary. * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize); /*! ZSTD_checkContinuity() : * check if next `dst` follows previous position, where decompression ended. * If yes, do nothing (continue on current segment). * If not, classify previous segment as "external dictionary", and start a new segment. * This function cannot fail. */ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst); #endif /* ZSTD_DECOMPRESS_INTERNAL_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.c0000644000175000017500000017117513771325506030236 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* zstd_decompress_block : * this module takes care of decompressing _compressed_ block */ /*-******************************************************* * Dependencies *********************************************************/ #include /* memcpy, memmove, memset */ #include "../common/compiler.h" /* prefetch */ #include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/zstd_internal.h" #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" /*_******************************************************* * Macros **********************************************************/ /* These two optional macros force the use one way or another of the two * ZSTD_decompressSequences implementations. You can't force in both directions * at the same time. */ #if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) #error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!" #endif /*_******************************************************* * Memory operations **********************************************************/ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } /*-************************************************************* * Block decoding ***************************************************************/ /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, ""); { U32 const cBlockHeader = MEM_readLE24(src); U32 const cSize = cBlockHeader >> 3; bpPtr->lastBlock = cBlockHeader & 1; bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); bpPtr->origSize = cSize; /* only useful for RLE */ if (bpPtr->blockType == bt_rle) return 1; RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, ""); return cSize; } } /* Hidden declaration for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const void* src, size_t srcSize); /*! ZSTD_decodeLiteralsBlock() : * @return : nb of bytes read from src (< srcSize ) * note : symbol not declared but exposed for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ { DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); { const BYTE* const istart = (const BYTE*) src; symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); switch(litEncType) { case set_repeat: DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); /* fall-through */ case set_compressed: RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); { size_t lhSize, litSize, litCSize; U32 singleStream=0; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ /* 2 - 2 - 10 - 10 */ singleStream = !lhlCode; lhSize = 3; litSize = (lhc >> 4) & 0x3FF; litCSize = (lhc >> 14) & 0x3FF; break; case 2: /* 2 - 2 - 14 - 14 */ lhSize = 4; litSize = (lhc >> 4) & 0x3FFF; litCSize = lhc >> 18; break; case 3: /* 2 - 2 - 18 - 18 */ lhSize = 5; litSize = (lhc >> 4) & 0x3FFFF; litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); break; } RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); /* prefetch huffman table if cold */ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable)); } if (litEncType==set_repeat) { if (singleStream) { hufSuccess = HUF_decompress1X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2); } else { hufSuccess = HUF_decompress4X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2); } } else { if (singleStream) { #if defined(HUF_FORCE_DECOMPRESS_X2) hufSuccess = HUF_decompress1X_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace)); #else hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace), dctx->bmi2); #endif } else { hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace), dctx->bmi2); } } RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; dctx->litEntropy = 1; if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case set_basic: { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ lhSize = 1; litSize = istart[0] >> 3; break; case 1: lhSize = 2; litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; litSize = MEM_readLE24(istart) >> 4; break; } if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); memcpy(dctx->litBuffer, istart+lhSize, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; return lhSize+litSize; } case set_rle: { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ lhSize = 1; litSize = istart[0] >> 3; break; case 1: lhSize = 2; litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; litSize = MEM_readLE24(istart) >> 4; RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); break; } RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; } default: RETURN_ERROR(corruption_detected, "impossible"); } } } /* Default FSE distribution tables. * These are pre-calculated FSE decoding tables using default distributions as defined in specification : * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions * They were generated programmatically with following method : * - start from default distributions, present in /lib/common/zstd_internal.h * - generate tables normally, using ZSTD_buildFSETable() * - printout the content of tables * - pretify output, report below, test with fuzzer to ensure it's correct */ /* Default FSE distribution table for Literal Lengths */ static const ZSTD_seqSymbol LL_defaultDTable[(1<tableLog = 0; DTableH->fastMode = 0; cell->nbBits = 0; cell->nextState = 0; assert(nbAddBits < 255); cell->nbAdditionalBits = (BYTE)nbAddBits; cell->baseValue = baseValue; } /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) * cannot fail if input is valid => * all inputs are presumed validated at this stage */ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, const U32* baseValue, const U32* nbAdditionalBits, unsigned tableLog) { ZSTD_seqSymbol* const tableDecode = dt+1; U16 symbolNext[MaxSeq+1]; U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; U32 highThreshold = tableSize-1; /* Sanity Checks */ assert(maxSymbolValue <= MaxSeq); assert(tableLog <= MaxFSELog); /* Init, lay down lowprob symbols */ { ZSTD_seqSymbol_header DTableH; DTableH.tableLog = tableLog; DTableH.fastMode = 1; { S16 const largeLimit= (S16)(1 << (tableLog-1)); U32 s; for (s=0; s= largeLimit) DTableH.fastMode=0; assert(normalizedCounter[s]>=0); symbolNext[s] = (U16)normalizedCounter[s]; } } } memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ { U32 const tableMask = tableSize-1; U32 const step = FSE_TABLESTEP(tableSize); U32 s, position = 0; for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } /* Build Decoding table */ { U32 u; for (u=0; u max, corruption_detected, ""); { U32 const symbol = *(const BYTE*)src; U32 const baseline = baseValue[symbol]; U32 const nbBits = nbAdditionalBits[symbol]; ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); } *DTablePtr = DTableSpace; return 1; case set_basic : *DTablePtr = defaultTable; return 0; case set_repeat: RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, ""); /* prefetch FSE table if used */ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) { const void* const pStart = *DTablePtr; size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog)); PREFETCH_AREA(pStart, pSize); } return 0; case set_compressed : { unsigned tableLog; S16 norm[MaxSeq+1]; size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, ""); RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, ""); ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog); *DTablePtr = DTableSpace; return headerSize; } default : assert(0); RETURN_ERROR(GENERIC, "impossible"); } } size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; int nbSeq; DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); /* check */ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, ""); /* SeqHead */ nbSeq = *ip++; if (!nbSeq) { *nbSeqPtr=0; RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, ""); return 1; } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; } else { RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); nbSeq = ((nbSeq-0x80)<<8) + *ip++; } } *nbSeqPtr = nbSeq; /* FSE table descriptors */ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); ip++; /* Build DTables */ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_base, LL_bits, LL_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq); RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += llhSize; } { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_base, OF_bits, OF_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq); RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += ofhSize; } { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_base, ML_bits, ML_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq); RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += mlhSize; } } return ip-istart; } typedef struct { size_t litLength; size_t matchLength; size_t offset; const BYTE* match; } seq_t; typedef struct { size_t state; const ZSTD_seqSymbol* table; } ZSTD_fseState; typedef struct { BIT_DStream_t DStream; ZSTD_fseState stateLL; ZSTD_fseState stateOffb; ZSTD_fseState stateML; size_t prevOffset[ZSTD_REP_NUM]; const BYTE* prefixStart; const BYTE* dictEnd; size_t pos; } seqState_t; /*! ZSTD_overlapCopy8() : * Copies 8 bytes from ip to op and updates op and ip where ip <= op. * If the offset is < 8 then the offset is spread to at least 8 bytes. * * Precondition: *ip <= *op * Postcondition: *op - *op >= 8 */ HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { assert(*ip <= *op); if (offset < 8) { /* close range match, overlap */ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ int const sub2 = dec64table[offset]; (*op)[0] = (*ip)[0]; (*op)[1] = (*ip)[1]; (*op)[2] = (*ip)[2]; (*op)[3] = (*ip)[3]; *ip += dec32table[offset]; ZSTD_copy4(*op+4, *ip); *ip -= sub2; } else { ZSTD_copy8(*op, *ip); } *ip += 8; *op += 8; assert(*op - *ip >= 8); } /*! ZSTD_safecopy() : * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer * and write up to 16 bytes past oend_w (op >= oend_w is allowed). * This function is only called in the uncommon case where the sequence is near the end of the block. It * should be fast for a single long sequence, but can be slow for several short sequences. * * @param ovtype controls the overlap detection * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. * The src buffer must be before the dst buffer. */ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { ptrdiff_t const diff = op - ip; BYTE* const oend = op + length; assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) || (ovtype == ZSTD_overlap_src_before_dst && diff >= 0)); if (length < 8) { /* Handle short lengths. */ while (op < oend) *op++ = *ip++; return; } if (ovtype == ZSTD_overlap_src_before_dst) { /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ assert(length >= 8); ZSTD_overlapCopy8(&op, &ip, diff); assert(op - ip >= 8); assert(op <= oend); } if (oend <= oend_w) { /* No risk of overwrite. */ ZSTD_wildcopy(op, ip, length, ovtype); return; } if (op <= oend_w) { /* Wildcopy until we get close to the end. */ assert(oend > oend_w); ZSTD_wildcopy(op, ip, oend_w - op, ovtype); ip += oend_w - op; op = oend_w; } /* Handle the leftovers. */ while (op < oend) *op++ = *ip++; } /* ZSTD_execSequenceEnd(): * This version handles cases that are near the end of the output buffer. It requires * more careful checks to make sure there is no overflow. By separating out these hard * and unlikely cases, we can speed up the common cases. * * NOTE: This function needs to be fast for a single long sequence, but doesn't need * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). */ FORCE_NOINLINE size_t ZSTD_execSequenceEnd(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* bounds checks : careful of address space overflow in 32-bit mode */ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); assert(op < op + sequenceLength); assert(oLitEnd < op + sequenceLength); /* copy literals */ ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap); op = oLitEnd; *litPtr = iLitEnd; /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix */ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); match = dictEnd - (prefixStart-match); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = prefixStart; } } ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); return sequenceLength; } HINT_INLINE size_t ZSTD_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */ const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; assert(op != NULL /* Precondition */); assert(oend_w < oend /* No underflow */); /* Handle edge cases in a slow path: * - Read beyond end of literals * - Match end is within WILDCOPY_OVERLIMIT of oend * - 32-bit mode and the match length overflows */ if (UNLIKELY( iLitEnd > litLimit || oMatchEnd > oend_w || (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ assert(op <= oLitEnd /* No overflow */); assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); assert(oMatchEnd <= oend /* No underflow */); assert(iLitEnd <= litLimit /* Literal length is in bounds */); assert(oLitEnd <= oend_w /* Can wildcopy literals */); assert(oMatchEnd <= oend_w /* Can wildcopy matches */); /* Copy Literals: * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. * We likely don't need the full 32-byte wildcopy. */ assert(WILDCOPY_OVERLENGTH >= 16); ZSTD_copy16(op, (*litPtr)); if (UNLIKELY(sequence.litLength > 16)) { ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap); } op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ /* Copy Match */ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix -> go into extDict */ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); match = dictEnd + (match - prefixStart); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = prefixStart; } } /* Match within prefix of 1 or more bytes */ assert(op <= oMatchEnd); assert(oMatchEnd <= oend_w); assert(match >= prefixStart); assert(sequence.matchLength >= 1); /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy * without overlap checking. */ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { /* We bet on a full wildcopy for matches, since we expect matches to be * longer than literals (in general). In silesia, ~10% of matches are longer * than 16 bytes. */ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); return sequenceLength; } assert(sequence.offset < WILDCOPY_VECLEN); /* Copy 8 bytes and spread the offset to be >= 8. */ ZSTD_overlapCopy8(&op, &match, sequence.offset); /* If the match length is > 8 bytes, then continue with the wildcopy. */ if (sequence.matchLength > 8) { assert(op < oMatchEnd); ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); } return sequenceLength; } static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) { const void* ptr = dt; const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr; DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits", (U32)DStatePtr->state, DTableH->tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } FORCE_INLINE_TEMPLATE void ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) { ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.nextState + lowBits; } FORCE_INLINE_TEMPLATE void ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo) { U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.nextState + lowBits; } /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1) * bits before reloading. This value is the maximum number of bytes we read * after reloading when we are decoding long offsets. */ #define LONG_OFFSETS_MAX_EXTRA_BITS_32 \ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \ : 0) typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e; FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch) { seq_t seq; ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state]; ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state]; ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state]; U32 const llBase = llDInfo.baseValue; U32 const mlBase = mlDInfo.baseValue; U32 const ofBase = ofDInfo.baseValue; BYTE const llBits = llDInfo.nbAdditionalBits; BYTE const mlBits = mlDInfo.nbAdditionalBits; BYTE const ofBits = ofDInfo.nbAdditionalBits; BYTE const totalBits = llBits+mlBits+ofBits; /* sequence */ { size_t offset; if (ofBits > 1) { ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); assert(ofBits <= MaxOff); if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); BIT_reloadDStream(&seqState->DStream); if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ } else { offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); } seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } else { U32 const ll0 = (llBase == 0); if (LIKELY((ofBits == 0))) { if (LIKELY(!ll0)) offset = seqState->prevOffset[0]; else { offset = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } } else { offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset = temp; } } } seq.offset = offset; } seq.matchLength = mlBase; if (mlBits > 0) seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) BIT_reloadDStream(&seqState->DStream); if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) BIT_reloadDStream(&seqState->DStream); /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); seq.litLength = llBase; if (llBits > 0) seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); if (prefetch == ZSTD_p_prefetch) { size_t const pos = seqState->pos + seq.litLength; const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart; seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. * No consequence though : no memory access will occur, offset is only used for prefetching */ seqState->pos = pos + seq.matchLength; } /* ANS state update * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo(). * clang-9.2.0 does 7% worse with ZSTD_updateFseState(). * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the * better option, so it is the default for other compilers. But, if you * measure that it is worse, please put up a pull request. */ { #if defined(__GNUC__) && !defined(__clang__) const int kUseUpdateFseState = 1; #else const int kUseUpdateFseState = 0; #endif if (kUseUpdateFseState) { ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ } else { ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */ } } return seq; } #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) { size_t const windowSize = dctx->fParams.windowSize; /* No dictionary used. */ if (dctx->dictContentEndForFuzzing == NULL) return 0; /* Dictionary is our prefix. */ if (prefixStart == dctx->dictContentBeginForFuzzing) return 1; /* Dictionary is not our ext-dict. */ if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0; /* Dictionary is not within our window size. */ if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0; /* Dictionary is active. */ return 1; } MEM_STATIC void ZSTD_assertValidSequence( ZSTD_DCtx const* dctx, BYTE const* op, BYTE const* oend, seq_t const seq, BYTE const* prefixStart, BYTE const* virtualStart) { size_t const windowSize = dctx->fParams.windowSize; size_t const sequenceSize = seq.litLength + seq.matchLength; BYTE const* const oLitEnd = op + seq.litLength; DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); assert(op <= oend); assert((size_t)(oend - op) >= sequenceSize); assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX); if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); /* Offset must be within the dictionary. */ assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); assert(seq.offset <= windowSize + dictSize); } else { /* Offset must be within our window. */ assert(seq.offset <= windowSize); } } #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); DEBUGLOG(5, "ZSTD_decompressSequences_body"); (void)frame; /* Regen sequences */ if (nbSeq) { seqState_t seqState; size_t error = 0; dctx->fseEntropy = 1; { U32 i; for (i=0; ientropy.rep[i]; } RETURN_ERROR_IF( ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); assert(dst != NULL); ZSTD_STATIC_ASSERT( BIT_DStream_unfinished < BIT_DStream_completed && BIT_DStream_endOfBuffer < BIT_DStream_completed && BIT_DStream_completed < BIT_DStream_overflow); #if defined(__GNUC__) && defined(__x86_64__) /* Align the decompression loop to 32 + 16 bytes. * * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression * speed swings based on the alignment of the decompression loop. This * performance swing is caused by parts of the decompression loop falling * out of the DSB. The entire decompression loop should fit in the DSB, * when it can't we get much worse performance. You can measure if you've * hit the good case or the bad case with this perf command for some * compressed file test.zst: * * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst * * If you see most cycles served out of the MITE you've hit the bad case. * If you see most cycles served out of the DSB you've hit the good case. * If it is pretty even then you may be in an okay case. * * I've been able to reproduce this issue on the following CPUs: * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 * Use Instruments->Counters to get DSB/MITE cycles. * I never got performance swings, but I was able to * go from the good case of mostly DSB to half of the * cycles served from MITE. * - Coffeelake: Intel i9-9900k * * I haven't been able to reproduce the instability or DSB misses on any * of the following CPUS: * - Haswell * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH * - Skylake * * If you are seeing performance stability this script can help test. * It tests on 4 commits in zstd where I saw performance change. * * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 */ __asm__(".p2align 5"); __asm__("nop"); __asm__(".p2align 4"); #endif for ( ; ; ) { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); #endif DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); BIT_reloadDStream(&(seqState.DStream)); /* gcc and clang both don't like early returns in this loop. * gcc doesn't like early breaks either. * Instead save an error and report it at the end. * When there is an error, don't increment op, so we don't * overwrite. */ if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize; else op += oneSeqSize; if (UNLIKELY(!--nbSeq)) break; } /* check if reached exact end */ DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq); if (ZSTD_isError(error)) return error; RETURN_ERROR_IF(nbSeq, corruption_detected, ""); RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, ""); /* save reps for next block */ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static size_t ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); (void)frame; /* Regen sequences */ if (nbSeq) { #define STORED_SEQS 4 #define STORED_SEQS_MASK (STORED_SEQS-1) #define ADVANCED_SEQS 4 seq_t sequences[STORED_SEQS]; int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); seqState_t seqState; int seqNb; dctx->fseEntropy = 1; { int i; for (i=0; ientropy.rep[i]; } seqState.prefixStart = prefixStart; seqState.pos = (size_t)(op-prefixStart); seqState.dictEnd = dictEnd; assert(dst != NULL); assert(iend >= ip); RETURN_ERROR_IF( ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); /* prepare in advance */ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNbentropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static size_t ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ #if DYNAMIC_BMI2 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG static TARGET_ATTRIBUTE("bmi2") size_t DONT_VECTORIZE ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT static TARGET_ATTRIBUTE("bmi2") size_t ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ #endif /* DYNAMIC_BMI2 */ typedef size_t (*ZSTD_decompressSequences_t)( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame); #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { DEBUGLOG(5, "ZSTD_decompressSequences"); #if DYNAMIC_BMI2 if (dctx->bmi2) { return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT /* ZSTD_decompressSequencesLong() : * decompression function triggered when a minimum share of offsets is considered "long", * aka out of cache. * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". * This function will try to mitigate main memory latency through the use of prefetching */ static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { DEBUGLOG(5, "ZSTD_decompressSequencesLong"); #if DYNAMIC_BMI2 if (dctx->bmi2) { return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) /* ZSTD_getLongOffsetsShare() : * condition : offTable must be valid * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) * compared to maximum possible of (1< 22) total += 1; } assert(tableLog <= OffFSELog); total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ return total; } #endif size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const int frame) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; /* isLongOffset must be true if there are long offsets. * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. * We don't expect that to be the case in 64-bit mode. * In block mode, window size is not known, so we have to be conservative. * (note: but it could be evaluated from current-lowLimit) */ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)))); DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); /* Decode literals section */ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; } /* Build Decoding Tables */ { /* These macros control at build-time which decompressor implementation * we use. If neither is defined, we do some inspection and dispatch at * runtime. */ #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) int usePrefetchDecoder = dctx->ddictIsCold; #endif int nbSeq; size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); if (ZSTD_isError(seqHSize)) return seqHSize; ip += seqHSize; srcSize -= seqHSize; RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) if ( !usePrefetchDecoder && (!frame || (dctx->fParams.windowSize > (1<<24))) && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */ U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ usePrefetchDecoder = (shareLongOffsets >= minShare); } #endif dctx->ddictIsCold = 0; #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) if (usePrefetchDecoder) #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif } } void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) { if (dst != dctx->previousDstEnd) { /* not contiguous */ dctx->dictEnd = dctx->previousDstEnd; dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); dctx->prefixStart = dst; dctx->previousDstEnd = dst; } } size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t dSize; ZSTD_checkContinuity(dctx, dst); dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.h0000644000175000017500000000243413771325506026003 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_DDICT_H #define ZSTD_DDICT_H /*-******************************************************* * Dependencies *********************************************************/ #include /* size_t */ #include "../zstd.h" /* ZSTD_DDict, and several public functions */ /*-******************************************************* * Interface *********************************************************/ /* note: several prototypes are already published in `zstd.h` : * ZSTD_createDDict() * ZSTD_createDDict_byReference() * ZSTD_createDDict_advanced() * ZSTD_freeDDict() * ZSTD_initStaticDDict() * ZSTD_sizeof_DDict() * ZSTD_estimateDDictSize() * ZSTD_getDictID_fromDict() */ const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict); size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict); void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); #endif /* ZSTD_DDICT_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.h0000644000175000017500000000377013771325506030236 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_DEC_BLOCK_H #define ZSTD_DEC_BLOCK_H /*-******************************************************* * Dependencies *********************************************************/ #include /* size_t */ #include "../zstd.h" /* DCtx, and some public functions */ #include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */ #include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */ /* === Prototypes === */ /* note: prototypes already published within `zstd.h` : * ZSTD_decompressBlock() */ /* note: prototypes already published within `zstd_internal.h` : * ZSTD_getcBlockSize() * ZSTD_decodeSeqHeaders() */ /* ZSTD_decompressBlock_internal() : * decompress block, starting at `src`, * into destination buffer `dst`. * @return : decompressed block size, * or an error code (which can be tested using ZSTD_isError()) */ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const int frame); /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) * this function must be called with valid parameters only * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.) * in which case it cannot fail. * Internal use only. */ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, const U32* baseValue, const U32* nbAdditionalBits, unsigned tableLog); #endif /* ZSTD_DEC_BLOCK_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/0000755000175000017500000000000013771325773022626 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/error_private.c0000644000175000017500000000557613771325506025664 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* The purpose of this file is to have a single list of error strings embedded in binary */ #include "error_private.h" const char* ERR_getErrorString(ERR_enum code) { #ifdef ZSTD_STRIP_ERROR_STRINGS (void)code; return "Error strings stripped"; #else static const char* const notErrorCode = "Unspecified error code"; switch( code ) { case PREFIX(no_error): return "No error detected"; case PREFIX(GENERIC): return "Error (generic)"; case PREFIX(prefix_unknown): return "Unknown frame descriptor"; case PREFIX(version_unsupported): return "Version not supported"; case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; case PREFIX(corruption_detected): return "Corrupted block detected"; case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; case PREFIX(parameter_unsupported): return "Unsupported parameter"; case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; case PREFIX(maxCode): default: return notErrorCode; } #endif } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/threading.c0000644000175000017500000000551013771325506024732 0ustar useruser00000000000000/** * Copyright (c) 2016 Tino Reichardt * All rights reserved. * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /** * This file will hold wrapper for systems, which do not support pthreads */ #include "threading.h" /* create fake symbol to avoid empty translation unit warning */ int g_ZSTD_threading_useless_symbol; #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** * Windows minimalist Pthread Wrapper, based on : * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html */ /* === Dependencies === */ #include #include /* === Implementation === */ static unsigned __stdcall worker(void *arg) { ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; thread->arg = thread->start_routine(thread->arg); return 0; } int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg) { (void)unused; thread->arg = arg; thread->start_routine = start_routine; thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); if (!thread->handle) return errno; else return 0; } int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) { DWORD result; if (!thread.handle) return 0; result = WaitForSingleObject(thread.handle, INFINITE); switch (result) { case WAIT_OBJECT_0: if (value_ptr) *value_ptr = thread.arg; return 0; case WAIT_ABANDONED: return EINVAL; default: return GetLastError(); } } #endif /* ZSTD_MULTITHREAD */ #if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32) #include int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr) { *mutex = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t)); if (!*mutex) return 1; return pthread_mutex_init(*mutex, attr); } int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex) { if (!*mutex) return 0; { int const ret = pthread_mutex_destroy(*mutex); free(*mutex); return ret; } } int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr) { *cond = (pthread_cond_t*)malloc(sizeof(pthread_cond_t)); if (!*cond) return 1; return pthread_cond_init(*cond, attr); } int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond) { if (!*cond) return 0; { int const ret = pthread_cond_destroy(*cond); free(*cond); return ret; } } #endif borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/mem.h0000644000175000017500000003512213771325506023552 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * Dependencies ******************************************/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ /*-**************************************** * Compiler specifics ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif #if defined(__GNUC__) # define MEM_STATIC static __inline __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif #ifndef __has_builtin # define __has_builtin(x) 0 /* compat. with non-clang compilers */ #endif /* code only tested on 32 and 64 bits systems */ #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } /* detects whether we are being compiled under msan */ #if defined (__has_feature) # if __has_feature(memory_sanitizer) # define MEMORY_SANITIZER 1 # endif #endif #if defined (MEMORY_SANITIZER) /* Not all platforms that support msan provide sanitizers/msan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ #include /* intptr_t */ /* Make memory region fully initialized (without changing its contents). */ void __msan_unpoison(const volatile void *a, size_t size); /* Make memory region fully uninitialized (without changing its contents). This is a legacy interface that does not update origin information. Use __msan_allocated_memory() instead. */ void __msan_poison(const volatile void *a, size_t size); /* Returns the offset of the first (at least partially) poisoned byte in the memory range, or -1 if the whole range is good. */ intptr_t __msan_test_shadow(const volatile void *x, size_t size); #endif /* detects whether we are being compiled under asan */ #if defined (__has_feature) # if __has_feature(address_sanitizer) # define ADDRESS_SANITIZER 1 # endif #elif defined(__SANITIZE_ADDRESS__) # define ADDRESS_SANITIZER 1 #endif #if defined (ADDRESS_SANITIZER) /* Not all platforms that support asan provide sanitizers/asan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ /** * Marks a memory region ([addr, addr+size)) as unaddressable. * * This memory must be previously allocated by your program. Instrumented * code is forbidden from accessing addresses in this region until it is * unpoisoned. This function is not guaranteed to poison the entire region - * it could poison only a subregion of [addr, addr+size) due to ASan * alignment restrictions. * * \note This function is not thread-safe because no two threads can poison or * unpoison memory in the same memory region simultaneously. * * \param addr Start of memory region. * \param size Size of memory region. */ void __asan_poison_memory_region(void const volatile *addr, size_t size); /** * Marks a memory region ([addr, addr+size)) as addressable. * * This memory must be previously allocated by your program. Accessing * addresses in this region is allowed until this region is poisoned again. * This function could unpoison a super-region of [addr, addr+size) due * to ASan alignment restrictions. * * \note This function is not thread-safe because no two threads can * poison or unpoison memory in the same memory region simultaneously. * * \param addr Start of memory region. * \param size Size of memory region. */ void __asan_unpoison_memory_region(void const volatile *addr, size_t size); #endif /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else # include #if CHAR_BIT != 8 # error "this implementation requires char to be exactly 8-bit type" #endif typedef unsigned char BYTE; #if USHRT_MAX != 65535 # error "this implementation requires short to be exactly 16-bit type" #endif typedef unsigned short U16; typedef signed short S16; #if UINT_MAX != 4294967295 # error "this implementation requires int to be exactly 32-bit type" #endif typedef unsigned int U32; typedef signed int S32; /* note : there are no limits defined for long long type in C90. * limits exist in C99, however, in such case, is preferred */ typedef unsigned long long U64; typedef signed long long S64; #endif /*-************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard, by lying on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) __pragma( pack(push, 1) ) typedef struct { U16 v; } unalign16; typedef struct { U32 v; } unalign32; typedef struct { U64 v; } unalign64; typedef struct { size_t v; } unalignArch; __pragma( pack(pop) ) #else typedef struct { U16 v; } __attribute__((packed)) unalign16; typedef struct { U32 v; } __attribute__((packed)) unalign32; typedef struct { U64 v; } __attribute__((packed)) unalign64; typedef struct { size_t v; } __attribute__((packed)) unalignArch; #endif MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC size_t MEM_readST(const void* memPtr) { size_t val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_ulong(in); #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ || (defined(__clang__) && __has_builtin(__builtin_bswap32)) return __builtin_bswap32(in); #else return ((in << 24) & 0xff000000 ) | ((in << 8) & 0x00ff0000 ) | ((in >> 8) & 0x0000ff00 ) | ((in >> 24) & 0x000000ff ); #endif } MEM_STATIC U64 MEM_swap64(U64 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_uint64(in); #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ || (defined(__clang__) && __has_builtin(__builtin_bswap64)) return __builtin_bswap64(in); #else return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | ((in >> 8) & 0x00000000ff000000ULL) | ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); #endif } MEM_STATIC size_t MEM_swapST(size_t in) { if (MEM_32bits()) return (size_t)MEM_swap32((U32)in); else return (size_t)MEM_swap64((U64)in); } /*=== Little endian r/w ===*/ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE24(const void* memPtr) { return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); } MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) { MEM_writeLE16(memPtr, (U16)val); ((BYTE*)memPtr)[2] = (BYTE)(val>>16); } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else return MEM_swap32(MEM_read32(memPtr)); } MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) { if (MEM_isLittleEndian()) MEM_write32(memPtr, val32); else MEM_write32(memPtr, MEM_swap32(val32)); } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else return MEM_swap64(MEM_read64(memPtr)); } MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) { if (MEM_isLittleEndian()) MEM_write64(memPtr, val64); else MEM_write64(memPtr, MEM_swap64(val64)); } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) { if (MEM_32bits()) MEM_writeLE32(memPtr, (U32)val); else MEM_writeLE64(memPtr, (U64)val); } /*=== Big endian r/w ===*/ MEM_STATIC U32 MEM_readBE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_swap32(MEM_read32(memPtr)); else return MEM_read32(memPtr); } MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) { if (MEM_isLittleEndian()) MEM_write32(memPtr, MEM_swap32(val32)); else MEM_write32(memPtr, val32); } MEM_STATIC U64 MEM_readBE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_swap64(MEM_read64(memPtr)); else return MEM_read64(memPtr); } MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) { if (MEM_isLittleEndian()) MEM_write64(memPtr, MEM_swap64(val64)); else MEM_write64(memPtr, val64); } MEM_STATIC size_t MEM_readBEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readBE32(memPtr); else return (size_t)MEM_readBE64(memPtr); } MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) { if (MEM_32bits()) MEM_writeBE32(memPtr, (U32)val); else MEM_writeBE64(memPtr, (U64)val); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/compiler.h0000644000175000017500000001412113771325506024602 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPILER_H #define ZSTD_COMPILER_H /*-******************************************************* * Compiler specifics *********************************************************/ /* force inlining */ #if !defined(ZSTD_NO_INLINE) #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # define INLINE_KEYWORD inline #else # define INLINE_KEYWORD #endif #if defined(__GNUC__) || defined(__ICCARM__) # define FORCE_INLINE_ATTR __attribute__((always_inline)) #elif defined(_MSC_VER) # define FORCE_INLINE_ATTR __forceinline #else # define FORCE_INLINE_ATTR #endif #else #define INLINE_KEYWORD #define FORCE_INLINE_ATTR #endif /** * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant * parameters. They must be inlined for the compiler to eliminate the constant * branches. */ #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR /** * HINT_INLINE is used to help the compiler generate better code. It is *not* * used for "templates", so it can be tweaked based on the compilers * performance. * * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the * always_inline attribute. * * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline * attribute. */ #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 # define HINT_INLINE static INLINE_KEYWORD #else # define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR #endif /* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ #if defined(__GNUC__) # define UNUSED_ATTR __attribute__((unused)) #else # define UNUSED_ATTR #endif /* force no inlining */ #ifdef _MSC_VER # define FORCE_NOINLINE static __declspec(noinline) #else # if defined(__GNUC__) || defined(__ICCARM__) # define FORCE_NOINLINE static __attribute__((__noinline__)) # else # define FORCE_NOINLINE static # endif #endif /* target attribute */ #ifndef __has_attribute #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */ #endif #if defined(__GNUC__) || defined(__ICCARM__) # define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) #else # define TARGET_ATTRIBUTE(target) #endif /* Enable runtime BMI2 dispatch based on the CPU. * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. */ #ifndef DYNAMIC_BMI2 #if ((defined(__clang__) && __has_attribute(__target__)) \ || (defined(__GNUC__) \ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ && (defined(__x86_64__) || defined(_M_X86)) \ && !defined(__BMI2__) # define DYNAMIC_BMI2 1 #else # define DYNAMIC_BMI2 0 #endif #endif /* prefetch * can be disabled, by declaring NO_PREFETCH build macro */ #if defined(NO_PREFETCH) # define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ # define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ #else # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ # include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) # define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1) # elif defined(__aarch64__) # define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))) # define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))) # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) # define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) # define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */) # else # define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ # define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ # endif #endif /* NO_PREFETCH */ #define CACHELINE_SIZE 64 #define PREFETCH_AREA(p, s) { \ const char* const _ptr = (const char*)(p); \ size_t const _size = (size_t)(s); \ size_t _pos; \ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ PREFETCH_L2(_ptr + _pos); \ } \ } /* vectorization * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */ #if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) # if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5) # define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) # else # define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")") # endif #else # define DONT_VECTORIZE #endif /* Tell the compiler that a branch is likely or unlikely. * Only use these macros if it causes the compiler to generate better code. * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc * and clang, please do. */ #if defined(__GNUC__) #define LIKELY(x) (__builtin_expect((x), 1)) #define UNLIKELY(x) (__builtin_expect((x), 0)) #else #define LIKELY(x) (x) #define UNLIKELY(x) (x) #endif /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif #endif /* ZSTD_COMPILER_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/debug.h0000644000175000017500000000746513771325506024073 0ustar useruser00000000000000/* ****************************************************************** * debug * Part of FSE library * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* * The purpose of this header is to enable debug functions. * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time, * and DEBUG_STATIC_ASSERT() for compile-time. * * By default, DEBUGLEVEL==0, which means run-time debug is disabled. * * Level 1 enables assert() only. * Starting level 2, traces can be generated and pushed to stderr. * The higher the level, the more verbose the traces. * * It's possible to dynamically adjust level using variable g_debug_level, * which is only declared if DEBUGLEVEL>=2, * and is a global variable, not multi-thread protected (use with care) */ #ifndef DEBUG_H_12987983217 #define DEBUG_H_12987983217 #if defined (__cplusplus) extern "C" { #endif /* static assert is triggered at compile time, leaving no runtime artefact. * static assert only works with compile-time constants. * Also, this variant can only be used inside a function. */ #define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1]) /* DEBUGLEVEL is expected to be defined externally, * typically through compiler command line. * Value must be a number. */ #ifndef DEBUGLEVEL # define DEBUGLEVEL 0 #endif /* DEBUGFILE can be defined externally, * typically through compiler command line. * note : currently useless. * Value must be stderr or stdout */ #ifndef DEBUGFILE # define DEBUGFILE stderr #endif /* recommended values for DEBUGLEVEL : * 0 : release mode, no debug, all run-time checks disabled * 1 : enables assert() only, no display * 2 : reserved, for currently active debug path * 3 : events once per object lifetime (CCtx, CDict, etc.) * 4 : events once per frame * 5 : events once per block * 6 : events once per sequence (verbose) * 7+: events at every position (*very* verbose) * * It's generally inconvenient to output traces > 5. * In which case, it's possible to selectively trigger high verbosity levels * by modifying g_debug_level. */ #if (DEBUGLEVEL>=1) # include #else # ifndef assert /* assert may be already defined, due to prior #include */ # define assert(condition) ((void)0) /* disable assert (default) */ # endif #endif #if (DEBUGLEVEL>=2) # include extern int g_debuglevel; /* the variable is only declared, it actually lives in debug.c, and is shared by the whole process. It's not thread-safe. It's useful when enabling very verbose levels on selective conditions (such as position in src) */ # define RAWLOG(l, ...) { \ if (l<=g_debuglevel) { \ fprintf(stderr, __VA_ARGS__); \ } } # define DEBUGLOG(l, ...) { \ if (l<=g_debuglevel) { \ fprintf(stderr, __FILE__ ": " __VA_ARGS__); \ fprintf(stderr, " \n"); \ } } #else # define RAWLOG(l, ...) {} /* disabled */ # define DEBUGLOG(l, ...) {} /* disabled */ #endif #if defined (__cplusplus) } #endif #endif /* DEBUG_H_12987983217 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/debug.c0000644000175000017500000000151513771325506024054 0ustar useruser00000000000000/* ****************************************************************** * debug * Part of FSE library * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* * This module only hosts one global variable * which can be used to dynamically influence the verbosity of traces, * such as DEBUGLOG and RAWLOG */ #include "debug.h" int g_debuglevel = DEBUGLEVEL; borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/pool.c0000644000175000017500000002546013771325506023744 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ====== Dependencies ======= */ #include /* size_t */ #include "debug.h" /* assert */ #include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */ #include "pool.h" /* ====== Compiler specifics ====== */ #if defined(_MSC_VER) # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ #endif #ifdef ZSTD_MULTITHREAD #include "threading.h" /* pthread adaptation */ /* A job is a function and an opaque argument */ typedef struct POOL_job_s { POOL_function function; void *opaque; } POOL_job; struct POOL_ctx_s { ZSTD_customMem customMem; /* Keep track of the threads */ ZSTD_pthread_t* threads; size_t threadCapacity; size_t threadLimit; /* The queue is a circular buffer */ POOL_job *queue; size_t queueHead; size_t queueTail; size_t queueSize; /* The number of threads working on jobs */ size_t numThreadsBusy; /* Indicates if the queue is empty */ int queueEmpty; /* The mutex protects the queue */ ZSTD_pthread_mutex_t queueMutex; /* Condition variable for pushers to wait on when the queue is full */ ZSTD_pthread_cond_t queuePushCond; /* Condition variables for poppers to wait on when the queue is empty */ ZSTD_pthread_cond_t queuePopCond; /* Indicates if the queue is shutting down */ int shutdown; }; /* POOL_thread() : * Work thread for the thread pool. * Waits for jobs and executes them. * @returns : NULL on failure else non-null. */ static void* POOL_thread(void* opaque) { POOL_ctx* const ctx = (POOL_ctx*)opaque; if (!ctx) { return NULL; } for (;;) { /* Lock the mutex and wait for a non-empty queue or until shutdown */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); while ( ctx->queueEmpty || (ctx->numThreadsBusy >= ctx->threadLimit) ) { if (ctx->shutdown) { /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit), * a few threads will be shutdown while !queueEmpty, * but enough threads will remain active to finish the queue */ ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return opaque; } ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); } /* Pop a job off the queue */ { POOL_job const job = ctx->queue[ctx->queueHead]; ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; ctx->numThreadsBusy++; ctx->queueEmpty = ctx->queueHead == ctx->queueTail; /* Unlock the mutex, signal a pusher, and run the job */ ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); job.function(job.opaque); /* If the intended queue size was 0, signal after finishing job */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->numThreadsBusy--; if (ctx->queueSize == 1) { ZSTD_pthread_cond_signal(&ctx->queuePushCond); } ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } } /* for (;;) */ assert(0); /* Unreachable */ } POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { POOL_ctx* ctx; /* Check parameters */ if (!numThreads) { return NULL; } /* Allocate the context and zero initialize */ ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem); if (!ctx) { return NULL; } /* Initialize the job queue. * It needs one extra space since one space is wasted to differentiate * empty and full queues. */ ctx->queueSize = queueSize + 1; ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem); ctx->queueHead = 0; ctx->queueTail = 0; ctx->numThreadsBusy = 0; ctx->queueEmpty = 1; { int error = 0; error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); if (error) { POOL_free(ctx); return NULL; } } ctx->shutdown = 0; /* Allocate space for the thread handles */ ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->threadCapacity = 0; ctx->customMem = customMem; /* Check for errors */ if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } /* Initialize the threads */ { size_t i; for (i = 0; i < numThreads; ++i) { if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { ctx->threadCapacity = i; POOL_free(ctx); return NULL; } } ctx->threadCapacity = numThreads; ctx->threadLimit = numThreads; } return ctx; } /*! POOL_join() : Shutdown the queue, wake any sleeping threads, and join all of the threads. */ static void POOL_join(POOL_ctx* ctx) { /* Shut down the queue */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->shutdown = 1; ZSTD_pthread_mutex_unlock(&ctx->queueMutex); /* Wake up sleeping threads */ ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); /* Join all of the threads */ { size_t i; for (i = 0; i < ctx->threadCapacity; ++i) { ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ } } } void POOL_free(POOL_ctx *ctx) { if (!ctx) { return; } POOL_join(ctx); ZSTD_pthread_mutex_destroy(&ctx->queueMutex); ZSTD_pthread_cond_destroy(&ctx->queuePushCond); ZSTD_pthread_cond_destroy(&ctx->queuePopCond); ZSTD_free(ctx->queue, ctx->customMem); ZSTD_free(ctx->threads, ctx->customMem); ZSTD_free(ctx, ctx->customMem); } size_t POOL_sizeof(POOL_ctx *ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ return sizeof(*ctx) + ctx->queueSize * sizeof(POOL_job) + ctx->threadCapacity * sizeof(ZSTD_pthread_t); } /* @return : 0 on success, 1 on error */ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) { if (numThreads <= ctx->threadCapacity) { if (!numThreads) return 1; ctx->threadLimit = numThreads; return 0; } /* numThreads > threadCapacity */ { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); if (!threadPool) return 1; /* replace existing thread pool */ memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); ZSTD_free(ctx->threads, ctx->customMem); ctx->threads = threadPool; /* Initialize additional threads */ { size_t threadId; for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) { if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) { ctx->threadCapacity = threadId; return 1; } } } } /* successfully expanded */ ctx->threadCapacity = numThreads; ctx->threadLimit = numThreads; return 0; } /* @return : 0 on success, 1 on error */ int POOL_resize(POOL_ctx* ctx, size_t numThreads) { int result; if (ctx==NULL) return 1; ZSTD_pthread_mutex_lock(&ctx->queueMutex); result = POOL_resize_internal(ctx, numThreads); ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return result; } /** * Returns 1 if the queue is full and 0 otherwise. * * When queueSize is 1 (pool was created with an intended queueSize of 0), * then a queue is empty if there is a thread free _and_ no job is waiting. */ static int isQueueFull(POOL_ctx const* ctx) { if (ctx->queueSize > 1) { return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); } else { return (ctx->numThreadsBusy == ctx->threadLimit) || !ctx->queueEmpty; } } static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) { POOL_job const job = {function, opaque}; assert(ctx != NULL); if (ctx->shutdown) return; ctx->queueEmpty = 0; ctx->queue[ctx->queueTail] = job; ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; ZSTD_pthread_cond_signal(&ctx->queuePopCond); } void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { assert(ctx != NULL); ZSTD_pthread_mutex_lock(&ctx->queueMutex); /* Wait until there is space in the queue for the new job */ while (isQueueFull(ctx) && (!ctx->shutdown)) { ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); } POOL_add_internal(ctx, function, opaque); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { assert(ctx != NULL); ZSTD_pthread_mutex_lock(&ctx->queueMutex); if (isQueueFull(ctx)) { ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return 0; } POOL_add_internal(ctx, function, opaque); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return 1; } #else /* ZSTD_MULTITHREAD not defined */ /* ========================== */ /* No multi-threading support */ /* ========================== */ /* We don't need any data, but if it is empty, malloc() might return NULL. */ struct POOL_ctx_s { int dummy; }; static POOL_ctx g_ctx; POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { (void)numThreads; (void)queueSize; (void)customMem; return &g_ctx; } void POOL_free(POOL_ctx* ctx) { assert(!ctx || ctx == &g_ctx); (void)ctx; } int POOL_resize(POOL_ctx* ctx, size_t numThreads) { (void)ctx; (void)numThreads; return 0; } void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { (void)ctx; function(opaque); } int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { (void)ctx; function(opaque); return 1; } size_t POOL_sizeof(POOL_ctx* ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ assert(ctx == &g_ctx); return sizeof(*ctx); } #endif /* ZSTD_MULTITHREAD */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/zstd_internal.h0000644000175000017500000003571413771325506025663 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_CCOMMON_H_MODULE #define ZSTD_CCOMMON_H_MODULE /* this module contains definitions which must be identical * across compression, decompression and dictBuilder. * It also contains a few functions useful to at least 2 of them * and which benefit from being inlined */ /*-************************************* * Dependencies ***************************************/ #ifdef __aarch64__ #include #endif #include "compiler.h" #include "mem.h" #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ #include "error_private.h" #define ZSTD_STATIC_LINKING_ONLY #include "../zstd.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" #define HUF_STATIC_LINKING_ONLY #include "huf.h" #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ #endif #include "xxhash.h" /* XXH_reset, update, digest */ #if defined (__cplusplus) extern "C" { #endif /* ---- static assert (debug) --- */ #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) #define ZSTD_isError ERR_isError /* for inlining */ #define FSE_isError ERR_isError #define HUF_isError ERR_isError /*-************************************* * shared macros ***************************************/ #undef MIN #undef MAX #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) /** * Ignore: this is an internal helper. * * This is a helper function to help force C99-correctness during compilation. * Under strict compilation modes, variadic macro arguments can't be empty. * However, variadic function arguments can be. Using a function therefore lets * us statically check that at least one (string) argument was passed, * independent of the compilation flags. */ static INLINE_KEYWORD UNUSED_ATTR void _force_has_format_string(const char *format, ...) { (void)format; } /** * Ignore: this is an internal helper. * * We want to force this function invocation to be syntactically correct, but * we don't want to force runtime evaluation of its arguments. */ #define _FORCE_HAS_FORMAT_STRING(...) \ if (0) { \ _force_has_format_string(__VA_ARGS__); \ } /** * Return the specified error if the condition evaluates to true. * * In debug modes, prints additional information. * In order to do that (particularly, printing the conditional that failed), * this can't just wrap RETURN_ERROR(). */ #define RETURN_ERROR_IF(cond, err, ...) \ if (cond) { \ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return ERROR(err); \ } /** * Unconditionally return the specified error. * * In debug modes, prints additional information. */ #define RETURN_ERROR(err, ...) \ do { \ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return ERROR(err); \ } while(0); /** * If the provided expression evaluates to an error code, returns that error code. * * In debug modes, prints additional information. */ #define FORWARD_IF_ERROR(err, ...) \ do { \ size_t const err_code = (err); \ if (ERR_isError(err_code)) { \ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return err_code; \ } \ } while(0); /*-************************************* * Common constants ***************************************/ #define ZSTD_OPT_NUM (1<<12) #define ZSTD_REP_NUM 3 /* number of repcodes */ #define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define BIT1 2 #define BIT0 1 #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; #define ZSTD_FRAMEIDSIZE 4 /* magic number size */ #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; #define ZSTD_FRAMECHECKSUMSIZE 4 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ #define HufLog 12 typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; #define LONGNBSEQ 0x7F00 #define MINMATCH 3 #define Litbits 8 #define MaxLit ((1<= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN)); if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { /* Handle short offset copies. */ do { COPY8(op, ip) } while (op < oend); } else { assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); /* Separate out the first COPY16() call because the copy length is * almost certain to be short, so the branches have different * probabilities. Since it is almost certain to be short, only do * one COPY16() in the first call. Then, do two calls per loop since * at that point it is more likely to have a high trip count. */ #ifndef __aarch64__ do { COPY16(op, ip); } while (op < oend); #else COPY16(op, ip); if (op >= oend) return; do { COPY16(op, ip); COPY16(op, ip); } while (op < oend); #endif } } MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const length = MIN(dstCapacity, srcSize); if (length > 0) { memcpy(dst, src, length); } return length; } /* define "workspace is too large" as this number of times larger than needed */ #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* when workspace is continuously too large * during at least this number of times, * context's memory usage is considered wasteful, * because it's sized to handle a worst case scenario which rarely happens. * In which case, resize it down to free some memory */ #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /*-******************************************* * Private declarations *********************************************/ typedef struct seqDef_s { U32 offset; U16 litLength; U16 matchLength; } seqDef; typedef struct { seqDef* sequencesStart; seqDef* sequences; BYTE* litStart; BYTE* lit; BYTE* llCode; BYTE* mlCode; BYTE* ofCode; size_t maxNbSeq; size_t maxNbLit; U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ U32 longLengthPos; } seqStore_t; typedef struct { U32 litLength; U32 matchLength; } ZSTD_sequenceLength; /** * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength. */ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq) { ZSTD_sequenceLength seqLen; seqLen.litLength = seq->litLength; seqLen.matchLength = seq->matchLength + MINMATCH; if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { if (seqStore->longLengthID == 1) { seqLen.litLength += 0xFFFF; } if (seqStore->longLengthID == 2) { seqLen.matchLength += 0xFFFF; } } return seqLen; } /** * Contains the compressed frame size and an upper-bound for the decompressed frame size. * Note: before using `compressedSize`, check for errors using ZSTD_isError(). * similarly, before using `decompressedBound`, check for errors using: * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` */ typedef struct { size_t compressedSize; unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ /* custom memory allocation functions */ void* ZSTD_malloc(size_t size, ZSTD_customMem customMem); void* ZSTD_calloc(size_t size, ZSTD_customMem customMem); void ZSTD_free(void* ptr, ZSTD_customMem customMem); MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ { assert(val != 0); { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; return _BitScanReverse(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ return __builtin_clz (val) ^ 31; # elif defined(__ICCARM__) /* IAR Intrinsic */ return 31 - __CLZ(val); # else /* Software version */ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; # endif } } /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ typedef struct { blockType_e blockType; U32 lastBlock; U32 origSize; } blockProperties_t; /* declared here for decompress and fullbench */ /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ /* Used by: decompress, fullbench (does not get its definition from here) */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr); /*! ZSTD_decodeSeqHeaders() : * decode sequence header from src */ /* Used by: decompress, fullbench (does not get its definition from here) */ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_CCOMMON_H_MODULE */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/zstd_errors.h0000644000175000017500000000731413771325506025356 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_ERRORS_H_398273423 #define ZSTD_ERRORS_H_398273423 #if defined (__cplusplus) extern "C" { #endif /*===== dependency =====*/ #include /* size_t */ /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ #ifndef ZSTDERRORLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZSTDERRORLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY #endif /*-********************************************* * Error codes list *-********************************************* * Error codes _values_ are pinned down since v1.3.1 only. * Therefore, don't rely on values if you may link to any version < v1.3.1. * * Only values < 100 are considered stable. * * note 1 : this API shall be used with static linking only. * dynamic linking is not yet officially supported. * note 2 : Prefer relying on the enum than on its value whenever possible * This is the only supported way to use the error list < v1.3.1 * note 3 : ZSTD_isError() is always correct, whatever the library version. **********************************************/ typedef enum { ZSTD_error_no_error = 0, ZSTD_error_GENERIC = 1, ZSTD_error_prefix_unknown = 10, ZSTD_error_version_unsupported = 12, ZSTD_error_frameParameter_unsupported = 14, ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, ZSTD_error_workSpace_tooSmall= 66, ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; /*! ZSTD_getErrorCode() : convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, which can be used to compare with enum list published above */ ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ #if defined (__cplusplus) } #endif #endif /* ZSTD_ERRORS_H_398273423 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/xxhash.c0000644000175000017500000006672613771325506024310 0ustar useruser00000000000000/* * xxHash - Fast Hash algorithm * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - xxHash homepage: http://www.xxhash.com * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ************************************* * Tuning parameters ***************************************/ /*!XXH_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. * It can generate buggy code on targets which do not support unaligned memory accesses. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://stackoverflow.com/a/32095106/646947 for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define XXH_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \ defined(__ICCARM__) # define XXH_FORCE_MEMORY_ACCESS 1 # endif #endif /*!XXH_ACCEPT_NULL_INPUT_POINTER : * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. * By default, this option is disabled. To enable it, uncomment below define : */ /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ /*!XXH_FORCE_NATIVE_FORMAT : * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. * Results are therefore identical for little-endian and big-endian CPU. * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. * Should endian-independence be of no importance for your application, you may set the #define below to 1, * to improve speed for Big-endian CPU. * This option has no impact on Little_Endian CPU. */ #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ # define XXH_FORCE_NATIVE_FORMAT 0 #endif /*!XXH_FORCE_ALIGN_CHECK : * This is a minor performance trick, only useful with lots of very small keys. * It means : check for aligned/unaligned input. * The check costs one initial branch per hash; set to 0 when the input data * is guaranteed to be aligned. */ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) # define XXH_FORCE_ALIGN_CHECK 0 # else # define XXH_FORCE_ALIGN_CHECK 1 # endif #endif /* ************************************* * Includes & Memory related functions ***************************************/ /* Modify the local functions below should you wish to use some other memory routines */ /* for malloc(), free() */ #include #include /* size_t */ static void* XXH_malloc(size_t s) { return malloc(s); } static void XXH_free (void* p) { free(p); } /* for memcpy() */ #include static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY #endif #include "xxhash.h" /* ************************************* * Compiler Specific Options ***************************************/ #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # define INLINE_KEYWORD inline #else # define INLINE_KEYWORD #endif #if defined(__GNUC__) || defined(__ICCARM__) # define FORCE_INLINE_ATTR __attribute__((always_inline)) #elif defined(_MSC_VER) # define FORCE_INLINE_ATTR __forceinline #else # define FORCE_INLINE_ATTR #endif #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR #ifdef _MSC_VER # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************* * Basic Types ***************************************/ #ifndef MEM_MODULE # define MEM_MODULE # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; # else typedef unsigned char BYTE; typedef unsigned short U16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ # endif #endif #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ static U32 XXH_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } static U64 XXH_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ /* **************************************** * Compiler-specific Functions and Macros ******************************************/ #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ #if defined(_MSC_VER) # define XXH_rotl32(x,r) _rotl(x,r) # define XXH_rotl64(x,r) _rotl64(x,r) #else #if defined(__ICCARM__) # include # define XXH_rotl32(x,r) __ROR(x,(32 - r)) #else # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) #endif # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #endif #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap32 _byteswap_ulong # define XXH_swap64 _byteswap_uint64 #elif GCC_VERSION >= 403 # define XXH_swap32 __builtin_bswap32 # define XXH_swap64 __builtin_bswap64 #else static U32 XXH_swap32 (U32 x) { return ((x << 24) & 0xff000000 ) | ((x << 8) & 0x00ff0000 ) | ((x >> 8) & 0x0000ff00 ) | ((x >> 24) & 0x000000ff ); } static U64 XXH_swap64 (U64 x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); } #endif /* ************************************* * Architecture Macros ***************************************/ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ #ifndef XXH_CPU_LITTLE_ENDIAN static const int g_one = 1; # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) #endif /* *************************** * Memory reads *****************************/ typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); else return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); } FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); } static U32 XXH_readBE32(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); } FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); else return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); } FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) { return XXH_readLE64_align(ptr, endian, XXH_unaligned); } static U64 XXH_readBE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); } /* ************************************* * Macros ***************************************/ #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************* * Constants ***************************************/ static const U32 PRIME32_1 = 2654435761U; static const U32 PRIME32_2 = 2246822519U; static const U32 PRIME32_3 = 3266489917U; static const U32 PRIME32_4 = 668265263U; static const U32 PRIME32_5 = 374761393U; static const U64 PRIME64_1 = 11400714785074694791ULL; static const U64 PRIME64_2 = 14029467366897019727ULL; static const U64 PRIME64_3 = 1609587929392839161ULL; static const U64 PRIME64_4 = 9650029242287828579ULL; static const U64 PRIME64_5 = 2870177450012600261ULL; XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } /* ************************** * Utils ****************************/ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) { memcpy(dstState, srcState, sizeof(*dstState)); } XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) { memcpy(dstState, srcState, sizeof(*dstState)); } /* *************************** * Simple Hash Functions *****************************/ static U32 XXH32_round(U32 seed, U32 input) { seed += input * PRIME32_2; seed = XXH_rotl32(seed, 13); seed *= PRIME32_1; return seed; } FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* bEnd = p + len; U32 h32; #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)16; } #endif if (len>=16) { const BYTE* const limit = bEnd - 16; U32 v1 = seed + PRIME32_1 + PRIME32_2; U32 v2 = seed + PRIME32_2; U32 v3 = seed + 0; U32 v4 = seed - PRIME32_1; do { v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; } while (p<=limit); h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); } else { h32 = seed + PRIME32_5; } h32 += (U32) len; while (p+4<=bEnd) { h32 += XXH_get32bits(p) * PRIME32_3; h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; p+=4; } while (p> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH32_CREATESTATE_STATIC(state); XXH32_reset(state, seed); XXH32_update(state, input, len); return XXH32_digest(state); #else XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); else return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); } } if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); else return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); #endif } static U64 XXH64_round(U64 acc, U64 input) { acc += input * PRIME64_2; acc = XXH_rotl64(acc, 31); acc *= PRIME64_1; return acc; } static U64 XXH64_mergeRound(U64 acc, U64 val) { val = XXH64_round(0, val); acc ^= val; acc = acc * PRIME64_1 + PRIME64_4; return acc; } FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; U64 h64; #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)32; } #endif if (len>=32) { const BYTE* const limit = bEnd - 32; U64 v1 = seed + PRIME64_1 + PRIME64_2; U64 v2 = seed + PRIME64_2; U64 v3 = seed + 0; U64 v4 = seed - PRIME64_1; do { v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; } while (p<=limit); h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = seed + PRIME64_5; } h64 += (U64) len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_get64bits(p)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_CREATESTATE_STATIC(state); XXH64_reset(state, seed); XXH64_update(state, input, len); return XXH64_digest(state); #else XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); } } if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); #endif } /* ************************************************** * Advanced Hash Functions ****************************************************/ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) { return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); } XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) { return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); } XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } /*** Hash feed ***/ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) { XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ state.v1 = seed + PRIME32_1 + PRIME32_2; state.v2 = seed + PRIME32_2; state.v3 = seed + 0; state.v4 = seed - PRIME32_1; memcpy(statePtr, &state, sizeof(state)); return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) { XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ state.v1 = seed + PRIME64_1 + PRIME64_2; state.v2 = seed + PRIME64_2; state.v3 = seed + 0; state.v4 = seed - PRIME64_1; memcpy(statePtr, &state, sizeof(state)); return XXH_OK; } FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (input==NULL) return XXH_ERROR; #endif state->total_len_32 += (unsigned)len; state->large_len |= (len>=16) | (state->total_len_32>=16); if (state->memsize + len < 16) { /* fill in tmp buffer */ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); state->memsize += (unsigned)len; return XXH_OK; } if (state->memsize) { /* some data left from previous update */ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); { const U32* p32 = state->mem32; state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; } p += 16-state->memsize; state->memsize = 0; } if (p <= bEnd-16) { const BYTE* const limit = bEnd - 16; U32 v1 = state->v1; U32 v2 = state->v2; U32 v3 = state->v3; U32 v4 = state->v4; do { v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; } while (p<=limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < bEnd) { XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); state->memsize = (unsigned)(bEnd-p); } return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_update_endian(state_in, input, len, XXH_littleEndian); else return XXH32_update_endian(state_in, input, len, XXH_bigEndian); } FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem32; const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; U32 h32; if (state->large_len) { h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); } else { h32 = state->v3 /* == seed */ + PRIME32_5; } h32 += state->total_len_32; while (p+4<=bEnd) { h32 += XXH_readLE32(p, endian) * PRIME32_3; h32 = XXH_rotl32(h32, 17) * PRIME32_4; p+=4; } while (p> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_digest_endian(state_in, XXH_littleEndian); else return XXH32_digest_endian(state_in, XXH_bigEndian); } /* **** XXH64 **** */ FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (input==NULL) return XXH_ERROR; #endif state->total_len += len; if (state->memsize + len < 32) { /* fill in tmp buffer */ if (input != NULL) { XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); } state->memsize += (U32)len; return XXH_OK; } if (state->memsize) { /* tmp buffer is full */ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); p += 32-state->memsize; state->memsize = 0; } if (p+32 <= bEnd) { const BYTE* const limit = bEnd - 32; U64 v1 = state->v1; U64 v2 = state->v2; U64 v3 = state->v3; U64 v4 = state->v4; do { v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; } while (p<=limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < bEnd) { XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); state->memsize = (unsigned)(bEnd-p); } return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_update_endian(state_in, input, len, XXH_littleEndian); else return XXH64_update_endian(state_in, input, len, XXH_bigEndian); } FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem64; const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; U64 h64; if (state->total_len >= 32) { U64 const v1 = state->v1; U64 const v2 = state->v2; U64 const v3 = state->v3; U64 const v4 = state->v4; h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = state->v3 + PRIME64_5; } h64 += (U64) state->total_len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_digest_endian(state_in, XXH_littleEndian); else return XXH64_digest_endian(state_in, XXH_bigEndian); } /* ************************** * Canonical representation ****************************/ /*! Default XXH result types are basic unsigned 32 and 64 bits. * The canonical representation follows human-readable write convention, aka big-endian (large digits first). * These functions allow transformation of hash result into and from its canonical format. * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); memcpy(dst, &hash, sizeof(*dst)); } XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); memcpy(dst, &hash, sizeof(*dst)); } XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) { return XXH_readBE32(src); } XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) { return XXH_readBE64(src); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/fse_decompress.c0000644000175000017500000002343113771325506025770 0ustar useruser00000000000000/* ****************************************************************** * FSE : Finite State Entropy decoder * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include "bitstream.h" #include "compiler.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" #include "error_private.h" /* ************************************************************** * Error Management ****************************************************************/ #define FSE_isError ERR_isError #define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ FSE_DTable* FSE_createDTable (unsigned tableLog) { if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); } void FSE_freeDTable (FSE_DTable* dt) { free(dt); } size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; U32 highThreshold = tableSize-1; /* Sanity Checks */ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ { FSE_DTableHeader DTableH; DTableH.tableLog = (U16)tableLog; DTableH.fastMode = 1; { S16 const largeLimit= (S16)(1 << (tableLog-1)); U32 s; for (s=0; s= largeLimit) DTableH.fastMode=0; symbolNext[s] = normalizedCounter[s]; } } } memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ { U32 const tableMask = tableSize-1; U32 const step = FSE_TABLESTEP(tableSize); U32 s, position = 0; for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } /* Build Decoding table */ { U32 u; for (u=0; utableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; void* dPtr = dt + 1; FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSV1 = tableMask+1; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ while (1) { if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSE_GETSYMBOL(&state1); if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { *op++ = FSE_GETSYMBOL(&state2); break; } if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSE_GETSYMBOL(&state2); if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { *op++ = FSE_GETSYMBOL(&state1); break; } } return op-ostart; } size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { const void* ptr = dt; const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; const U32 fastMode = DTableH->fastMode; /* select fast mode (static) */ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; /* normal FSE decoding mode */ size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSE_isError(NCountLength)) return NCountLength; /* if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); */ /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ if (tableLog > maxLog) return ERROR(tableLog_tooLarge); ip += NCountLength; cSrcSize -= NCountLength; CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ } typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) { DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG); } #endif /* FSE_COMMONDEFS_ONLY */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/zstd_common.c0000644000175000017500000000515613771325506025327 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include /* malloc, calloc, free */ #include /* memset */ #include "error_private.h" #include "zstd_internal.h" /*-**************************************** * Version ******************************************/ unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } /*-**************************************** * ZSTD Error Management ******************************************/ #undef ZSTD_isError /* defined within zstd_internal.h */ /*! ZSTD_isError() : * tells if a return value is an error code * symbol is required for external callers */ unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /*! ZSTD_getErrorName() : * provides error code string from function result (useful for debugging) */ const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } /*! ZSTD_getError() : * convert a `size_t` function result into a proper ZSTD_errorCode enum */ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } /*! ZSTD_getErrorString() : * provides error code string from enum */ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } /*=************************************************************** * Custom allocator ****************************************************************/ void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) { if (customMem.customAlloc) return customMem.customAlloc(customMem.opaque, size); return malloc(size); } void* ZSTD_calloc(size_t size, ZSTD_customMem customMem) { if (customMem.customAlloc) { /* calloc implemented as malloc+memset; * not as efficient as calloc, but next best guess for custom malloc */ void* const ptr = customMem.customAlloc(customMem.opaque, size); memset(ptr, 0, size); return ptr; } return calloc(1, size); } void ZSTD_free(void* ptr, ZSTD_customMem customMem) { if (ptr!=NULL) { if (customMem.customFree) customMem.customFree(customMem.opaque, ptr); else free(ptr); } } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/huf.h0000644000175000017500000004526213771325506023564 0ustar useruser00000000000000/* ****************************************************************** * huff0 huffman codec, * part of Finite State Entropy library * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif #ifndef HUF_H_298734234 #define HUF_H_298734234 /* *** Dependencies *** */ #include /* size_t */ /* *** library symbols visibility *** */ /* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, * HUF symbols remain "private" (internal symbols for library only). * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ #if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) # define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) #elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ # define HUF_PUBLIC_API __declspec(dllexport) #elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) # define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ #else # define HUF_PUBLIC_API #endif /* ========================== */ /* *** simple functions *** */ /* ========================== */ /** HUF_compress() : * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. * 'dst' buffer must be already allocated. * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. * @return : size of compressed data (<= `dstCapacity`). * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) */ HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize); /** HUF_decompress() : * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', * into already allocated buffer 'dst', of minimum size 'dstSize'. * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. * Note : in contrast with FSE, HUF_decompress can regenerate * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, * because it knows size to regenerate (originalSize). * @return : size of regenerated data (== originalSize), * or an error code, which can be tested using HUF_isError() */ HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize); /* *** Tool functions *** */ #define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ /* *** Advanced function *** */ /** HUF_compress2() : * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); /** HUF_compress4X_wksp() : * Same as HUF_compress2(), but uses externally allocated `workSpace`. * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */ #define HUF_WORKSPACE_SIZE ((6 << 10) + 256) #define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); #endif /* HUF_H_298734234 */ /* ****************************************************************** * WARNING !! * The following section contains advanced and experimental definitions * which shall never be used in the context of a dynamic library, * because they are not guaranteed to remain stable in the future. * Only consider them in association with static linking. * *****************************************************************/ #if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) #define HUF_H_HUF_STATIC_LINKING_ONLY /* *** Dependencies *** */ #include "mem.h" /* U32 */ /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ #define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ #define HUF_SYMBOLVALUE_MAX 255 #define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) # error "HUF_TABLELOG_MAX is too large !" #endif /* **************************************** * Static allocation ******************************************/ /* HUF buffer bounds */ #define HUF_CTABLEBOUND 129 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of HUF's Compression Table */ #define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ #define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \ void* name##hv = &(name##hb); \ HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ /* static allocation of HUF's DTable */ typedef U32 HUF_DTable; #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) #define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } /* **************************************** * Advanced decompression functions ******************************************/ size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ #endif size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ #endif /* **************************************** * HUF detailed API * ****************************************/ /*! HUF_compress() does the following: * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") * 2. (optional) refine tableLog using HUF_optimalTableLog() * 3. build Huffman table from count using HUF_buildCTable() * 4. save Huffman table to memory buffer using HUF_writeCTable() * 5. encode the data stream using HUF_compress4X_usingCTable() * * The following API allows targeting specific sub-functions for advanced tasks. * For example, it's possible to compress several blocks using the same 'CTable', * or to save and regenerate 'CTable' using external methods. */ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); typedef enum { HUF_repeat_none, /**< Cannot use the previous table */ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ } HUF_repeat; /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. */ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. */ #define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); /*! HUF_readStats() : * Read compact Huffman tree, saved by HUF_writeCTable(). * `huffWeight` is destination buffer. * @return : size read from `src` , or an error Code . * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize); /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); /** HUF_getNbBits() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX * Note 1 : is not inlined, as HUF_CElt definition is private * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); /* * HUF_decompress() does the following: * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics * 2. build Huffman table from save, using HUF_readDTableX?() * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() */ /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-computed metrics. * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . * Assumption : 0 < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); /** * The minimum workspace size for the `workSpace` used in * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp(). * * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. * Buffer overflow errors may potentially occur if code modifications result in * a required workspace size greater than that specified in the following * macro. */ #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); #endif size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif /* ====================== */ /* single stream variants */ /* ====================== */ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. */ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ #endif size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ #endif size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif /* BMI2 variants. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); #endif size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); #endif /* HUF_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/xxhash.h0000644000175000017500000002670613771325506024307 0ustar useruser00000000000000/* * xxHash - Extremely Fast Hash algorithm * Header File * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* Notice extracted from xxHash homepage : xxHash is an extremely fast Hash algorithm, running at RAM speed limits. It also successfully passes all tests from the SMHasher suite. Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) Name Speed Q.Score Author xxHash 5.4 GB/s 10 CrapWow 3.2 GB/s 2 Andrew MumurHash 3a 2.7 GB/s 10 Austin Appleby SpookyHash 2.0 GB/s 10 Bob Jenkins SBox 1.4 GB/s 9 Bret Mulvey Lookup3 1.2 GB/s 9 Bob Jenkins SuperFastHash 1.2 GB/s 1 Paul Hsieh CityHash64 1.05 GB/s 10 Pike & Alakuijala FNV 0.55 GB/s 5 Fowler, Noll, Vo CRC32 0.43 GB/s 9 MD5-32 0.33 GB/s 10 Ronald L. Rivest SHA1-32 0.28 GB/s 10 Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. A 64-bits version, named XXH64, is available since r35. It offers much better speed, but for 64-bits applications only. Name Speed on 64 bits Speed on 32 bits XXH64 13.8 GB/s 1.9 GB/s XXH32 6.8 GB/s 6.0 GB/s */ #if defined (__cplusplus) extern "C" { #endif #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 /* **************************** * Definitions ******************************/ #include /* size_t */ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; /* **************************** * API modifier ******************************/ /** XXH_PRIVATE_API * This is useful if you want to include xxhash functions in `static` mode * in order to inline them, and remove their symbol from the public list. * Methodology : * #define XXH_PRIVATE_API * #include "xxhash.h" * `xxhash.c` is automatically included. * It's not useful to compile and link it as a separate module anymore. */ #ifdef XXH_PRIVATE_API # ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY # endif # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define XXH_PUBLIC_API static inline # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else # define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ # endif #else # define XXH_PUBLIC_API /* do nothing */ #endif /* XXH_PRIVATE_API */ /*!XXH_NAMESPACE, aka Namespace Emulation : If you want to include _and expose_ xxHash functions from within your own library, but also want to avoid symbol collisions with another library which also includes xxHash, you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). Note that no change is required within the calling program as long as it includes `xxhash.h` : regular symbol name will be automatically translated by this header. */ #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) #endif /* ************************************* * Version ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 6 #define XXH_VERSION_RELEASE 2 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) XXH_PUBLIC_API unsigned XXH_versionNumber (void); /* **************************** * Simple Hash Functions ******************************/ typedef unsigned int XXH32_hash_t; typedef unsigned long long XXH64_hash_t; XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); /*! XXH32() : Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". The memory between input & input+length must be valid (allocated and read-accessible). "seed" can be used to alter the result predictably. Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s XXH64() : Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". "seed" can be used to alter the result predictably. This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). */ /* **************************** * Streaming Hash Functions ******************************/ typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ /*! State allocation, compatible with dynamic libraries */ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); /* hash streaming */ XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); /* These functions generate the xxHash of an input provided in multiple segments. Note that, for small input, they are slower than single-call functions, due to state management. For small input, prefer `XXH32()` and `XXH64()` . XXH state must first be allocated, using XXH*_createState() . Start a new hash by initializing state with a seed, using XXH*_reset(). Then, feed the hash state by calling XXH*_update() as many times as necessary. Obviously, input must be allocated and read accessible. The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. Finally, a hash value can be produced anytime, by using XXH*_digest(). This function returns the nn-bits hash as an int or long long. It's still possible to continue inserting input into the hash state after a digest, and generate some new hashes later on, by calling again XXH*_digest(). When done, free XXH state space if it was allocated dynamically. */ /* ************************** * Utils ****************************/ #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ # define restrict /* disable restrict */ #endif XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); /* ************************** * Canonical representation ****************************/ /* Default result type for XXH functions are primitive unsigned 32 and 64 bits. * The canonical representation uses human-readable write convention, aka big-endian (large digits first). * These functions allow transformation of hash result into and from its canonical format. * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. */ typedef struct { unsigned char digest[4]; } XXH32_canonical_t; typedef struct { unsigned char digest[8]; } XXH64_canonical_t; XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); #endif /* XXHASH_H_5627135585666179 */ /* ================================================================================================ This section contains definitions which are not guaranteed to remain stable. They may change in future versions, becoming incompatible with a different version of the library. They shall only be used with static linking. Never use these definitions in association with dynamic linking ! =================================================================================================== */ #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) #define XXH_STATIC_H_3543687687345 /* These definitions are only meant to allow allocation of XXH state statically, on stack, or in a struct for example. Do not use members directly. */ struct XXH32_state_s { unsigned total_len_32; unsigned large_len; unsigned v1; unsigned v2; unsigned v3; unsigned v4; unsigned mem32[4]; /* buffer defined as U32 for alignment */ unsigned memsize; unsigned reserved; /* never read nor write, will be removed in a future version */ }; /* typedef'd to XXH32_state_t */ struct XXH64_state_s { unsigned long long total_len; unsigned long long v1; unsigned long long v2; unsigned long long v3; unsigned long long v4; unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ unsigned memsize; unsigned reserved[2]; /* never read nor write, will be removed in a future version */ }; /* typedef'd to XXH64_state_t */ # ifdef XXH_PRIVATE_API # include "xxhash.c" /* include xxhash functions as `static`, for inlining */ # endif #endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ #if defined (__cplusplus) } #endif borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/pool.h0000644000175000017500000000476013771325506023751 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef POOL_H #define POOL_H #if defined (__cplusplus) extern "C" { #endif #include /* size_t */ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ #include "../zstd.h" typedef struct POOL_ctx_s POOL_ctx; /*! POOL_create() : * Create a thread pool with at most `numThreads` threads. * `numThreads` must be at least 1. * The maximum number of queued jobs before blocking is `queueSize`. * @return : POOL_ctx pointer on success, else NULL. */ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem); /*! POOL_free() : * Free a thread pool returned by POOL_create(). */ void POOL_free(POOL_ctx* ctx); /*! POOL_resize() : * Expands or shrinks pool's number of threads. * This is more efficient than releasing + creating a new context, * since it tries to preserve and re-use existing threads. * `numThreads` must be at least 1. * @return : 0 when resize was successful, * !0 (typically 1) if there is an error. * note : only numThreads can be resized, queueSize remains unchanged. */ int POOL_resize(POOL_ctx* ctx, size_t numThreads); /*! POOL_sizeof() : * @return threadpool memory usage * note : compatible with NULL (returns 0 in this case) */ size_t POOL_sizeof(POOL_ctx* ctx); /*! POOL_function : * The function type that can be added to a thread pool. */ typedef void (*POOL_function)(void*); /*! POOL_add() : * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. * Possibly blocks until there is room in the queue. * Note : The function may be executed asynchronously, * therefore, `opaque` must live until function has been completed. */ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); /*! POOL_tryAdd() : * Add the job `function(opaque)` to thread pool _if_ a worker is available. * Returns immediately even if not (does not block). * @return : 1 if successful, 0 if not. */ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); #if defined (__cplusplus) } #endif #endif borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/cpu.h0000644000175000017500000001057413771325506023567 0ustar useruser00000000000000/* * Copyright (c) 2018-2020, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMMON_CPU_H #define ZSTD_COMMON_CPU_H /** * Implementation taken from folly/CpuId.h * https://github.com/facebook/folly/blob/master/folly/CpuId.h */ #include #include "mem.h" #ifdef _MSC_VER #include #endif typedef struct { U32 f1c; U32 f1d; U32 f7b; U32 f7c; } ZSTD_cpuid_t; MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { U32 f1c = 0; U32 f1d = 0; U32 f7b = 0; U32 f7c = 0; #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) int reg[4]; __cpuid((int*)reg, 0); { int const n = reg[0]; if (n >= 1) { __cpuid((int*)reg, 1); f1c = (U32)reg[2]; f1d = (U32)reg[3]; } if (n >= 7) { __cpuidex((int*)reg, 7, 0); f7b = (U32)reg[1]; f7c = (U32)reg[2]; } } #elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) /* The following block like the normal cpuid branch below, but gcc * reserves ebx for use of its pic register so we must specially * handle the save and restore to avoid clobbering the register */ U32 n; __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "popl %%ebx\n\t" : "=a"(n) : "a"(0) : "ecx", "edx"); if (n >= 1) { U32 f1a; __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "popl %%ebx\n\t" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1)); } if (n >= 7) { __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "movl %%ebx, %%eax\n\t" "popl %%ebx" : "=a"(f7b), "=c"(f7c) : "a"(7), "c"(0) : "edx"); } #elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) U32 n; __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); if (n >= 1) { U32 f1a; __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); } if (n >= 7) { U32 f7a; __asm__("cpuid" : "=a"(f7a), "=b"(f7b), "=c"(f7c) : "a"(7), "c"(0) : "edx"); } #endif { ZSTD_cpuid_t cpuid; cpuid.f1c = f1c; cpuid.f1d = f1d; cpuid.f7b = f7b; cpuid.f7c = f7c; return cpuid; } } #define X(name, r, bit) \ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \ return ((cpuid.r) & (1U << bit)) != 0; \ } /* cpuid(1): Processor Info and Feature Bits. */ #define C(name, bit) X(name, f1c, bit) C(sse3, 0) C(pclmuldq, 1) C(dtes64, 2) C(monitor, 3) C(dscpl, 4) C(vmx, 5) C(smx, 6) C(eist, 7) C(tm2, 8) C(ssse3, 9) C(cnxtid, 10) C(fma, 12) C(cx16, 13) C(xtpr, 14) C(pdcm, 15) C(pcid, 17) C(dca, 18) C(sse41, 19) C(sse42, 20) C(x2apic, 21) C(movbe, 22) C(popcnt, 23) C(tscdeadline, 24) C(aes, 25) C(xsave, 26) C(osxsave, 27) C(avx, 28) C(f16c, 29) C(rdrand, 30) #undef C #define D(name, bit) X(name, f1d, bit) D(fpu, 0) D(vme, 1) D(de, 2) D(pse, 3) D(tsc, 4) D(msr, 5) D(pae, 6) D(mce, 7) D(cx8, 8) D(apic, 9) D(sep, 11) D(mtrr, 12) D(pge, 13) D(mca, 14) D(cmov, 15) D(pat, 16) D(pse36, 17) D(psn, 18) D(clfsh, 19) D(ds, 21) D(acpi, 22) D(mmx, 23) D(fxsr, 24) D(sse, 25) D(sse2, 26) D(ss, 27) D(htt, 28) D(tm, 29) D(pbe, 31) #undef D /* cpuid(7): Extended Features. */ #define B(name, bit) X(name, f7b, bit) B(bmi1, 3) B(hle, 4) B(avx2, 5) B(smep, 7) B(bmi2, 8) B(erms, 9) B(invpcid, 10) B(rtm, 11) B(mpx, 14) B(avx512f, 16) B(avx512dq, 17) B(rdseed, 18) B(adx, 19) B(smap, 20) B(avx512ifma, 21) B(pcommit, 22) B(clflushopt, 23) B(clwb, 24) B(avx512pf, 26) B(avx512er, 27) B(avx512cd, 28) B(sha, 29) B(avx512bw, 30) B(avx512vl, 31) #undef B #define C(name, bit) X(name, f7c, bit) C(prefetchwt1, 0) C(avx512vbmi, 1) #undef C #undef X #endif /* ZSTD_COMMON_CPU_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/fse.h0000644000175000017500000007633713771325506023566 0ustar useruser00000000000000/* ****************************************************************** * FSE : Finite State Entropy codec * Public Prototypes declaration * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif #ifndef FSE_H #define FSE_H /*-***************************************** * Dependencies ******************************************/ #include /* size_t, ptrdiff_t */ /*-***************************************** * FSE_PUBLIC_API : control library symbols visibility ******************************************/ #if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) # define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) #elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ # define FSE_PUBLIC_API __declspec(dllexport) #elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) # define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define FSE_PUBLIC_API #endif /*------ Version ------*/ #define FSE_VERSION_MAJOR 0 #define FSE_VERSION_MINOR 9 #define FSE_VERSION_RELEASE 0 #define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE #define FSE_QUOTE(str) #str #define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) #define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) #define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ /*-**************************************** * FSE simple functions ******************************************/ /*! FSE_compress() : Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). @return : size of compressed data (<= dstCapacity). Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. if FSE_isError(return), compression failed (more details using FSE_getErrorName()) */ FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*! FSE_decompress(): Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'dstCapacity'. @return : size of regenerated data (<= maxDstSize), or an error code, which can be tested using FSE_isError() . ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! Why ? : making this distinction requires a header. Header management is intentionally delegated to the user layer, which can better manage special cases. */ FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize); /*-***************************************** * Tool functions ******************************************/ FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ /* Error Management */ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ /*-***************************************** * FSE advanced functions ******************************************/ /*! FSE_compress2() : Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' Both parameters can be defined as '0' to mean : use default value @return : size of compressed data Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. if FSE_isError(return), it's an error code. */ FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); /*-***************************************** * FSE detailed API ******************************************/ /*! FSE_compress() does the following: 1. count symbol occurrence from source[] into table count[] (see hist.h) 2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) 3. save normalized counters to memory buffer using writeNCount() 4. build encoding table 'CTable' from normalized counters 5. encode the data stream using encoding table 'CTable' FSE_decompress() does the following: 1. read normalized counters with readNCount() 2. build decoding table 'DTable' from normalized counters 3. decode the data stream using decoding table 'DTable' The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and provide normalized distribution using external method. */ /* *** COMPRESSION *** */ /*! FSE_optimalTableLog(): dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. @return : recommended tableLog (necessarily <= 'maxTableLog') */ FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); /*! FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). @return : tableLog, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue); /*! FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. Typically useful for allocation purpose. */ FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); /*! FSE_writeNCount(): Compactly save 'normalizedCounter' into 'buffer'. @return : size of the compressed table, or an errorCode, which can be tested using FSE_isError(). */ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @return : 0, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_compress_usingCTable(): Compress `src` using `ct` into `dst` which must be already allocated. @return : size of compressed data (<= `dstCapacity`), or 0 if compressed data could not fit into `dst`, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); /*! Tutorial : ---------- The first step is to count all symbols. FSE_count() does this job very fast. Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. 'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) FSE_count() will return the number of occurrence of the most frequent symbol. This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). The next step is to normalize the frequencies. FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. It also guarantees a minimum of 1 to any Symbol with frequency >= 1. You can use 'tableLog'==0 to mean "use default tableLog value". If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). The result of FSE_normalizeCount() will be saved into a table, called 'normalizedCounter', which is a table of signed short. 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. The return value is tableLog if everything proceeded as expected. It is 0 if there is a single symbol within distribution. If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). 'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). 'buffer' must be already allocated. For guaranteed success, buffer size must be at least FSE_headerBound(). The result of the function is the number of bytes written into 'buffer'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). 'normalizedCounter' can then be used to create the compression table 'CTable'. The space required by 'CTable' must be already allocated, using FSE_createCTable(). You can then use FSE_buildCTable() to fill 'CTable'. If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). 'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. If it returns '0', compressed data could not fit into 'dst'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). */ /* *** DECOMPRESSION *** */ /*! FSE_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'. @return : size read from 'rBuffer', or an errorCode, which can be tested using FSE_isError(). maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); /*! Constructor and Destructor of FSE_DTable. Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); /*! FSE_buildDTable(): Builds 'dt', which must be already allocated, using FSE_createDTable(). return : 0, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_decompress_usingDTable(): Decompress compressed source `cSrc` of size `cSrcSize` using `dt` into `dst` which must be already allocated. @return : size of regenerated data (necessarily <= `dstCapacity`), or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : ---------- (Note : these functions only decompress FSE-compressed blocks. If block is uncompressed, use memcpy() instead If block is a single repeated byte, use memset() instead ) The first step is to obtain the normalized frequencies of symbols. This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. In practice, that means it's necessary to know 'maxSymbolValue' beforehand, or size the table to handle worst case situations (typically 256). FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. If there is an error, the function will return an error code, which can be tested using FSE_isError(). The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. This is performed by the function FSE_buildDTable(). The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). If there is an error, the function will return an error code, which can be tested using FSE_isError(). `FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). `cSrcSize` must be strictly correct, otherwise decompression will fail. FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) */ #endif /* FSE_H */ #if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) #define FSE_H_FSE_STATIC_LINKING_ONLY /* *** Dependency *** */ #include "bitstream.h" /* ***************************************** * Static allocation *******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 #define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1< 12) ? (1 << (maxTableLog - 2)) : 1024) ) size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); /**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /**< build a fake FSE_CTable, designed to compress always the same symbolValue */ /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` must be >= `(1<= BIT_DStream_completed When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. Checking if DStream has reached its end is performed by : BIT_endOfDStream(&DStream); Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. FSE_endOfDState(&DState); */ /* ***************************************** * FSE unsafe API *******************************************/ static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); /* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ /* ***************************************** * Implementation of inlined functions *******************************************/ typedef struct { int deltaFindState; U32 deltaNbBits; } FSE_symbolCompressionTransform; /* total 8 bytes */ MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) { const void* ptr = ct; const U16* u16ptr = (const U16*) ptr; const U32 tableLog = MEM_read16(ptr); statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1); statePtr->stateLog = tableLog; } /*! FSE_initCState2() : * Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) * uses the smallest state value possible, saving the cost of this symbol */ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) { FSE_initCState(statePtr, ct); { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* stateTable = (const U16*)(statePtr->stateTable); U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } } MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol) { FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); BIT_addBits(bitC, statePtr->value, nbBitsOut); statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) { BIT_addBits(bitC, statePtr->value, statePtr->stateLog); BIT_flushBits(bitC); } /* FSE_getMaxNbBits() : * Approximate maximum cost of a symbol, in bits. * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) { const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16; } /* FSE_bitCost() : * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog) { const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; U32 const threshold = (minNbBits+1) << 16; assert(tableLog < 16); assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */ { U32 const tableSize = 1 << tableLog; U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */ U32 const bitMultiplier = 1 << accuracyLog; assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); assert(normalizedDeltaFromThreshold <= bitMultiplier); return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold; } } /* ====== Decompression ====== */ typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) { const void* ptr = dt; const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; return DInfo.symbol; } MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; } MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } /*! FSE_decodeSymbolFast() : unsafe, only works if no symbol has a probability > 50% */ MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BIT_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } #ifndef FSE_COMMONDEFS_ONLY /* ************************************************************** * Tuning parameters ****************************************************************/ /*!MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #ifndef FSE_MAX_MEMORY_USAGE # define FSE_MAX_MEMORY_USAGE 14 #endif #ifndef FSE_DEFAULT_MEMORY_USAGE # define FSE_DEFAULT_MEMORY_USAGE 13 #endif /*!FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #ifndef FSE_MAX_SYMBOL_VALUE # define FSE_MAX_SYMBOL_VALUE 255 #endif /* ************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION #define FSE_DECODE_TYPE FSE_decode_t #endif /* !FSE_COMMONDEFS_ONLY */ /* *************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX # error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif #define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) #endif /* FSE_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/error_private.h0000644000175000017500000000461213771325506025657 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* Note : this module is expected to remain private, do not expose it */ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Dependencies ******************************************/ #include /* size_t */ #include "zstd_errors.h" /* enum list */ /* **************************************** * Compiler-specific ******************************************/ #if defined(__GNUC__) # define ERR_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define ERR_STATIC static inline #elif defined(_MSC_VER) # define ERR_STATIC static __inline #else # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-**************************************** * Customization (error_public.h) ******************************************/ typedef ZSTD_ErrorCode ERR_enum; #define PREFIX(name) ZSTD_error_##name /*-**************************************** * Error codes handling ******************************************/ #undef ERROR /* already defined on Visual Studio */ #define ERROR(name) ZSTD_ERROR(name) #define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } /* check and forward error code */ #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e #define CHECK_F(f) { CHECK_V_F(_var_err__, f); } /*-**************************************** * Error Strings ******************************************/ const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ ERR_STATIC const char* ERR_getErrorName(size_t code) { return ERR_getErrorString(ERR_getErrorCode(code)); } #if defined (__cplusplus) } #endif #endif /* ERROR_H_MODULE */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/entropy_common.c0000644000175000017500000002055113771325506026037 0ustar useruser00000000000000/* ****************************************************************** * Common functions of New Generation Entropy library * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************* * Dependencies ***************************************/ #include "mem.h" #include "error_private.h" /* ERR_*, ERROR */ #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ #include "fse.h" #define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ #include "huf.h" /*=== Version ===*/ unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } /*=== Error Management ===*/ unsigned FSE_isError(size_t code) { return ERR_isError(code); } const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); } unsigned HUF_isError(size_t code) { return ERR_isError(code); } const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { const BYTE* const istart = (const BYTE*) headerBuffer; const BYTE* const iend = istart + hbSize; const BYTE* ip = istart; int nbBits; int remaining; int threshold; U32 bitStream; int bitCount; unsigned charnum = 0; int previous0 = 0; if (hbSize < 4) { /* This function only works when hbSize >= 4 */ char buffer[4]; memset(buffer, 0, sizeof(buffer)); memcpy(buffer, headerBuffer, hbSize); { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, buffer, sizeof(buffer)); if (FSE_isError(countSize)) return countSize; if (countSize > hbSize) return ERROR(corruption_detected); return countSize; } } assert(hbSize >= 4); /* init */ memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */ bitStream = MEM_readLE32(ip); nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) & (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0 += 24; if (ip < iend-5) { ip += 2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount += 16; } } while ((bitStream & 3) == 3) { n0 += 3; bitStream >>= 2; bitCount += 2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { assert((bitCount >> 3) <= 3); /* For first condition to work */ ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 2; } } { int const max = (2*threshold-1) - remaining; int count; if ((bitStream & (threshold-1)) < (U32)max) { count = bitStream & (threshold-1); bitCount += nbBits-1; } else { count = bitStream & (2*threshold-1); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= count < 0 ? -count : count; /* -1 means +1 */ normalizedCounter[charnum++] = (short)count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ if (remaining != 1) return ERROR(corruption_detected); if (bitCount > 32) return ERROR(corruption_detected); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; return ip-istart; } /*! HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). `huffWeight` is destination buffer. `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. @return : size read from `src` , or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableX?() . */ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; { U32 n; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else { /* header compressed with FSE (normal case) */ FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); weightTotal = 0; { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ { U32 const tableLog = BIT_highbit32(weightTotal) + 1; if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; U32 const verif = 1 << BIT_highbit32(rest); U32 const lastWeight = BIT_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); return iSize+1; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/bitstream.h0000644000175000017500000004271113771325506024770 0ustar useruser00000000000000/* ****************************************************************** * bitstream * Part of FSE library * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which must be inlined for best performance. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /*-**************************************** * Dependencies ******************************************/ #include "mem.h" /* unaligned access routines */ #include "compiler.h" /* UNLIKELY() */ #include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */ #include "error_private.h" /* error codes and messages */ /*========================================= * Target specific =========================================*/ #if defined(__BMI__) && defined(__GNUC__) # include /* support for bextr (experimental) */ #elif defined(__ICCARM__) # include #endif #define STREAM_ACCUMULATOR_MIN_32 25 #define STREAM_ACCUMULATOR_MIN_64 57 #define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) /*-****************************************** * bitStream encoding API (write forward) ********************************************/ /* bitStream can mix input from multiple sources. * A critical property of these streams is that they encode and decode in **reverse** direction. * So the first bit sequence you add will be the last to be read, like a LIFO stack. */ typedef struct { size_t bitContainer; unsigned bitPos; char* startPtr; char* ptr; char* endPtr; } BIT_CStream_t; MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); /* Start with initCStream, providing the size of buffer to write into. * bitStream will never write outside of this buffer. * `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. * * bits are first added to a local register. * Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. * Writing data into memory is an explicit operation, performed by the flushBits function. * Hence keep track how many bits are potentially stored into local register to avoid register overflow. * After a flushBits, a maximum of 7 bits might still be stored into local register. * * Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. * * Last operation is to close the bitStream. * The function returns the final size of CStream in bytes. * If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) */ /*-******************************************** * bitStream decoding API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; const char* limitPtr; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /* Start by invoking BIT_initDStream(). * A chunk of the bitStream is then stored into a local register. * Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). * You can then retrieve bitFields stored into the local register, **in reverse order**. * Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. * A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. * Otherwise, it can be less than that, so proceed accordingly. * Checking if DStream has reached its end can be performed with BIT_endOfDStream(). */ /*-**************************************** * unsafe API ******************************************/ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); /* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); /* unsafe version; does not check buffer overflow */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /*-************************************************************** * Internal functions ****************************************************************/ MEM_STATIC unsigned BIT_highbit32 (U32 val) { assert(val != 0); { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; return _BitScanReverse ( &r, val ) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # elif defined(__ICCARM__) /* IAR Intrinsic */ return 31 - __CLZ(val); # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; # endif } } /*===== Local Constants =====*/ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ #define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) /*-************************************************************** * bitStream encoding ****************************************************************/ /*! BIT_initCStream() : * `dstCapacity` must be > sizeof(size_t) * @return : 0 if success, * otherwise an error code (can be tested using ERR_isError()) */ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity) { bitC->bitContainer = 0; bitC->bitPos = 0; bitC->startPtr = (char*)startPtr; bitC->ptr = bitC->startPtr; bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); return 0; } /*! BIT_addBits() : * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits) { MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32); assert(nbBits < BIT_MASK_SIZE); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; bitC->bitPos += nbBits; } /*! BIT_addBitsFast() : * works only if `value` is _clean_, * meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits) { assert((value>>nbBits) == 0); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= value << bitC->bitPos; bitC->bitPos += nbBits; } /*! BIT_flushBitsFast() : * assumption : bitContainer has not overflowed * unsafe version; does not check buffer overflow */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; bitC->bitPos &= 7; bitC->bitContainer >>= nbBytes*8; } /*! BIT_flushBits() : * assumption : bitContainer has not overflowed * safe version; check for buffer overflow, and prevents it. * note : does not signal buffer overflow. * overflow will be revealed later on using BIT_closeCStream() */ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; bitC->bitPos &= 7; bitC->bitContainer >>= nbBytes*8; } /*! BIT_closeCStream() : * @return : size of CStream, in bytes, * or 0 if it could not fit into dstBuffer */ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) { BIT_addBitsFast(bitC, 1, 1); /* endMark */ BIT_flushBits(bitC); if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); } /*-******************************************************** * bitStream decoding **********************************************************/ /*! BIT_initDStream() : * Initialize a BIT_DStream_t. * `bitD` : a pointer to an already allocated BIT_DStream_t structure. * `srcSize` must be the *exact* size of the bitStream, in bytes. * @return : size of stream (== srcSize), or an errorCode if a problem is detected */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } bitD->start = (const char*)srcBuffer; bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } } else { bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); /* fall-through */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); /* fall-through */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); /* fall-through */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */ default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; } MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ assert(nbBits < BIT_MASK_SIZE); return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; } MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { assert(nbBits < BIT_MASK_SIZE); return bitContainer & BIT_mask[nbBits]; } /*! BIT_lookBits() : * Provides next n bits from local register. * local register is not modified. * On 32-bits, maxNbBits==24. * On 64-bits, maxNbBits==56. * @return : value extracted */ MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { /* arbitrate between double-shift and shift+mask */ #if 1 /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8, * bitstream is likely corrupted, and result is undefined */ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); #else /* this code path is slower on my os-x laptop */ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); #endif } /*! BIT_lookBitsFast() : * unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) { U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; assert(nbBits >= 1); return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); } MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } /*! BIT_readBits() : * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_readBitsFast() : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBitsFast(bitD, nbBits); assert(nbBits >= 1); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_reloadDStreamFast() : * Similar to BIT_reloadDStream(), but with two differences: * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this * point you must use BIT_reloadDStream() to reload. */ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) { if (UNLIKELY(bitD->ptr < bitD->limitPtr)) return BIT_DStream_overflow; assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8); bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BIT_DStream_unfinished; } /*! BIT_reloadDStream() : * Refill `bitD` from buffer previously set in BIT_initDStream() . * This function is safe, it guarantees it will not read beyond src buffer. * @return : status of `BIT_DStream_t` internal register. * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ return BIT_DStream_overflow; if (bitD->ptr >= bitD->limitPtr) { return BIT_reloadDStreamFast(bitD); } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } /* start < ptr < limitPtr */ { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BIT_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ return result; } } /*! BIT_endOfDStream() : * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/common/threading.h0000644000175000017500000001235313771325506024742 0ustar useruser00000000000000/** * Copyright (c) 2016 Tino Reichardt * All rights reserved. * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef THREADING_H_938743 #define THREADING_H_938743 #include "debug.h" #if defined (__cplusplus) extern "C" { #endif #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** * Windows minimalist Pthread Wrapper, based on : * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html */ #ifdef WINVER # undef WINVER #endif #define WINVER 0x0600 #ifdef _WIN32_WINNT # undef _WIN32_WINNT #endif #define _WIN32_WINNT 0x0600 #ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN #endif #undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ #include #undef ERROR #define ERROR(name) ZSTD_ERROR(name) /* mutex */ #define ZSTD_pthread_mutex_t CRITICAL_SECTION #define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0) #define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) #define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) #define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) /* condition variable */ #define ZSTD_pthread_cond_t CONDITION_VARIABLE #define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0) #define ZSTD_pthread_cond_destroy(a) ((void)(a)) #define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) #define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) #define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) /* ZSTD_pthread_create() and ZSTD_pthread_join() */ typedef struct { HANDLE handle; void* (*start_routine)(void*); void* arg; } ZSTD_pthread_t; int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg); int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); /** * add here more wrappers as required */ #elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ /* === POSIX Systems === */ # include #if DEBUGLEVEL < 1 #define ZSTD_pthread_mutex_t pthread_mutex_t #define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) #define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) #define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) #define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) #define ZSTD_pthread_cond_t pthread_cond_t #define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) #define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) #define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) #define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) #define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) #define ZSTD_pthread_join(a, b) pthread_join((a),(b)) #else /* DEBUGLEVEL >= 1 */ /* Debug implementation of threading. * In this implementation we use pointers for mutexes and condition variables. * This way, if we forget to init/destroy them the program will crash or ASAN * will report leaks. */ #define ZSTD_pthread_mutex_t pthread_mutex_t* int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr); int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex); #define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a)) #define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a)) #define ZSTD_pthread_cond_t pthread_cond_t* int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr); int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); #define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b)) #define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a)) #define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a)) #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) #define ZSTD_pthread_join(a, b) pthread_join((a),(b)) #endif #else /* ZSTD_MULTITHREAD not defined */ /* No multithreading support */ typedef int ZSTD_pthread_mutex_t; #define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0) #define ZSTD_pthread_mutex_destroy(a) ((void)(a)) #define ZSTD_pthread_mutex_lock(a) ((void)(a)) #define ZSTD_pthread_mutex_unlock(a) ((void)(a)) typedef int ZSTD_pthread_cond_t; #define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0) #define ZSTD_pthread_cond_destroy(a) ((void)(a)) #define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b)) #define ZSTD_pthread_cond_signal(a) ((void)(a)) #define ZSTD_pthread_cond_broadcast(a) ((void)(a)) /* do not use ZSTD_pthread_t */ #endif /* ZSTD_MULTITHREAD */ #if defined (__cplusplus) } #endif #endif /* THREADING_H_938743 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/0000755000175000017500000000000013771325773022602 5ustar useruser00000000000000borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v07.h0000644000175000017500000002102213771325506024422 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDv07_H_235446 #define ZSTDv07_H_235446 #if defined (__cplusplus) extern "C" { #endif /*====== Dependency ======*/ #include /* size_t */ /*====== Export for Windows ======*/ /*! * ZSTDv07_DLL_EXPORT : * Enable exporting of functions when building a Windows DLL */ #if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1) # define ZSTDLIBv07_API __declspec(dllexport) #else # define ZSTDLIBv07_API #endif /* ************************************* * Simple API ***************************************/ /*! ZSTDv07_getDecompressedSize() : * @return : decompressed size if known, 0 otherwise. note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause. note 2 : decompressed size could be wrong or intentionally modified ! always ensure results fit within application's authorized limits */ unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTDv07_decompress() : `compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail. `dstCapacity` must be equal or larger than originalSize. @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */ ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /** ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /*====== Helper functions ======*/ ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */ /*-************************************* * Explicit memory management ***************************************/ /** Decompression context */ typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx; ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void); ZSTDLIBv07_API size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx); /*!< @return : errorCode */ /** ZSTDv07_decompressDCtx() : * Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */ ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-************************ * Simple dictionary API ***************************/ /*! ZSTDv07_decompress_usingDict() : * Decompression using a pre-defined Dictionary content (see dictBuilder). * Dictionary must be identical to the one used during compression. * Note : This function load the dictionary, resulting in a significant startup time */ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*-************************** * Advanced Dictionary API ****************************/ /*! ZSTDv07_createDDict() : * Create a digested dictionary, ready to start decompression operation without startup delay. * `dict` can be released after creation */ typedef struct ZSTDv07_DDict_s ZSTDv07_DDict; ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize); ZSTDLIBv07_API size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict); /*! ZSTDv07_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTDv07_DDict* ddict); typedef struct { unsigned long long frameContentSize; unsigned windowSize; unsigned dictID; unsigned checksumFlag; } ZSTDv07_frameParams; ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */ /* ************************************* * Streaming functions ***************************************/ typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx; ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void); ZSTDLIBv07_API size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx); ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx); ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression howto * * A ZBUFFv07_DCtx object is required to track streaming operations. * Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources. * Use ZBUFFv07_decompressInit() to start a new decompression operation, * or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv07_DCtx objects can be re-init multiple times. * * Use ZBUFFv07_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency), * or 0 when a frame is completely decoded, * or an error code, which can be tested using ZBUFFv07_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize() * output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv07_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode); ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, they tend to offer better latency */ ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void); ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void); /*-************************************* * Constants ***************************************/ #define ZSTDv07_MAGICNUMBER 0xFD2FB527 /* v0.7 */ #if defined (__cplusplus) } #endif #endif /* ZSTDv07_H_235446 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v05.c0000644000175000017500000046154313771325506024433 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*- Dependencies -*/ #include "zstd_v05.h" #include "../common/error_private.h" /* ****************************************************************** mem.h low-level memory access routines Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * Dependencies ******************************************/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ /*-**************************************** * Compiler specifics ******************************************/ #if defined(__GNUC__) # define MEM_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /*-************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard, by lying on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); } } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); } } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /* zstd - standard compression library Header File for static linking only Copyright (C) 2014-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : http://www.zstd.net */ #ifndef ZSTD_STATIC_H #define ZSTD_STATIC_H /* The prototypes defined within this file are considered experimental. * They should not be used in the context DLL as they may change in the future. * Prefer static linking if you need them, to control breaking version changes issues. */ #if defined (__cplusplus) extern "C" { #endif /*-************************************* * Types ***************************************/ #define ZSTDv05_WINDOWLOG_ABSOLUTEMIN 11 /*-************************************* * Advanced functions ***************************************/ /*- Advanced Decompression functions -*/ /*! ZSTDv05_decompress_usingPreparedDCtx() : * Same as ZSTDv05_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded. * It avoids reloading the dictionary each time. * `preparedDCtx` must have been properly initialized using ZSTDv05_decompressBegin_usingDict(). * Requires 2 contexts : 1 for reference, which will not be modified, and 1 to run the decompression operation */ size_t ZSTDv05_decompress_usingPreparedDCtx( ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* preparedDCtx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* ************************************** * Streaming functions (direct mode) ****************************************/ size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx); /* Streaming decompression, direct mode (bufferless) A ZSTDv05_DCtx object is required to track streaming operations. Use ZSTDv05_createDCtx() / ZSTDv05_freeDCtx() to manage it. A ZSTDv05_DCtx object can be re-used multiple times. First typical operation is to retrieve frame parameters, using ZSTDv05_getFrameParams(). This operation is independent, and just needs enough input data to properly decode the frame header. Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding. Result : 0 when successful, it means the ZSTDv05_parameters structure has been filled. >0 : means there is not enough data into src. Provides the expected size to successfully decode header. errorCode, which can be tested using ZSTDv05_isError() Start decompression, with ZSTDv05_decompressBegin() or ZSTDv05_decompressBegin_usingDict() Alternatively, you can copy a prepared context, using ZSTDv05_copyDCtx() Then use ZSTDv05_nextSrcSizeToDecompress() and ZSTDv05_decompressContinue() alternatively. ZSTDv05_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv05_decompressContinue(). ZSTDv05_decompressContinue() requires this exact amount of bytes, or it will fail. ZSTDv05_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog). They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible. @result of ZSTDv05_decompressContinue() is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTDv05_decompressContinue() has decoded some header. A frame is fully decoded when ZSTDv05_nextSrcSizeToDecompress() returns zero. Context can then be reset to start a new decompression. */ /* ************************************** * Block functions ****************************************/ /*! Block functions produce and decode raw zstd blocks, without frame metadata. User will have to take in charge required information to regenerate data, such as block sizes. A few rules to respect : - Uncompressed block size must be <= 128 KB - Compressing or decompressing requires a context structure + Use ZSTDv05_createCCtx() and ZSTDv05_createDCtx() - It is necessary to init context before starting + compression : ZSTDv05_compressBegin() + decompression : ZSTDv05_decompressBegin() + variants _usingDict() are also allowed + copyCCtx() and copyDCtx() work too - When a block is considered not compressible enough, ZSTDv05_compressBlock() result will be zero. In which case, nothing is produced into `dst`. + User must test for such outcome and deal directly with uncompressed data + ZSTDv05_decompressBlock() doesn't accept uncompressed data as input !! */ size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTDv05_STATIC_H */ /* zstd_internal - common functions to include Header File for include Copyright (C) 2014-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd */ #ifndef ZSTD_CCOMMON_H_MODULE #define ZSTD_CCOMMON_H_MODULE /*-************************************* * Common macros ***************************************/ #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) /*-************************************* * Common constants ***************************************/ #define ZSTDv05_DICT_MAGIC 0xEC30A435 #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BLOCKSIZE (128 KB) /* define, for static allocation */ static const size_t ZSTDv05_blockHeaderSize = 3; static const size_t ZSTDv05_frameHeaderSize_min = 5; #define ZSTDv05_frameHeaderSize_max 5 /* define, for static allocation */ #define BITv057 128 #define BITv056 64 #define BITv055 32 #define BITv054 16 #define BITv051 2 #define BITv050 1 #define IS_HUFv05 0 #define IS_PCH 1 #define IS_RAW 2 #define IS_RLE 3 #define MINMATCH 4 #define REPCODE_STARTVALUE 1 #define Litbits 8 #define MLbits 7 #define LLbits 6 #define Offbits 5 #define MaxLit ((1< /* size_t, ptrdiff_t */ /*-**************************************** * FSEv05 simple functions ******************************************/ size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize); /*! FSEv05_decompress(): Decompress FSEv05 data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'maxDstSize'. return : size of regenerated data (<= maxDstSize) or an error code, which can be tested using FSEv05_isError() ** Important ** : FSEv05_decompress() doesn't decompress non-compressible nor RLE data !!! Why ? : making this distinction requires a header. Header management is intentionally delegated to the user layer, which can better manage special cases. */ /* ***************************************** * Tool functions ******************************************/ /* Error Management */ unsigned FSEv05_isError(size_t code); /* tells if a return value is an error code */ const char* FSEv05_getErrorName(size_t code); /* provides error code string (useful for debugging) */ /* ***************************************** * FSEv05 detailed API ******************************************/ /* *** DECOMPRESSION *** */ /*! FSEv05_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'. return : size read from 'rBuffer' or an errorCode, which can be tested using FSEv05_isError() maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); /*! Constructor and Destructor of type FSEv05_DTable Note that its size depends on 'tableLog' */ typedef unsigned FSEv05_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ FSEv05_DTable* FSEv05_createDTable(unsigned tableLog); void FSEv05_freeDTable(FSEv05_DTable* dt); /*! FSEv05_buildDTable(): Builds 'dt', which must be already allocated, using FSEv05_createDTable() @return : 0, or an errorCode, which can be tested using FSEv05_isError() */ size_t FSEv05_buildDTable (FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSEv05_decompress_usingDTable(): Decompress compressed source @cSrc of size @cSrcSize using `dt` into `dst` which must be already allocated. @return : size of regenerated data (necessarily <= @dstCapacity) or an errorCode, which can be tested using FSEv05_isError() */ size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt); #if defined (__cplusplus) } #endif #endif /* FSEv05_H */ /* ****************************************************************** bitstream Part of FSEv05 library header file (to include) Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef BITv05STREAM_H_MODULE #define BITv05STREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which highly benefit from being inlined. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /*-******************************************** * bitStream decoding API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; } BITv05_DStream_t; typedef enum { BITv05_DStream_unfinished = 0, BITv05_DStream_endOfBuffer = 1, BITv05_DStream_completed = 2, BITv05_DStream_overflow = 3 } BITv05_DStream_status; /* result of BITv05_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits); MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD); MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* bitD); /*-**************************************** * unsafe API ******************************************/ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /*-************************************************************** * Helper functions ****************************************************************/ MEM_STATIC unsigned BITv05_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /*-******************************************************** * bitStream decoding **********************************************************/ /*!BITv05_initDStream * Initialize a BITv05_DStream_t. * @bitD : a pointer to an already allocated BITv05_DStream_t structure * @srcBuffer must point at the beginning of a bitStream * @srcSize must be the exact size of the bitStream * @result : size of stream (== srcSize) or an errorCode if a problem is detected */ MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } if (srcSize >= sizeof(size_t)) { /* normal case */ U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); bitD->bitContainer = MEM_readLEST(bitD->ptr); contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BITv05_highbit32(contain32); } else { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */ default: break; } contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BITv05_highbit32(contain32); bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } /*! BITv05_lookBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } MEM_STATIC void BITv05_skipBits(BITv05_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits) { size_t value = BITv05_lookBits(bitD, nbBits); BITv05_skipBits(bitD, nbBits); return value; } /*!BITv05_readBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits) { size_t value = BITv05_lookBitsFast(bitD, nbBits); BITv05_skipBits(bitD, nbBits); return value; } MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return BITv05_DStream_overflow; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BITv05_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv05_DStream_endOfBuffer; return BITv05_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; BITv05_DStream_status result = BITv05_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BITv05_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } /*! BITv05_endOfDStream * @return Tells if DStream has reached its exact end */ MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITv05STREAM_H_MODULE */ /* ****************************************************************** FSEv05 : Finite State Entropy coder header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSEv05_STATIC_H #define FSEv05_STATIC_H #if defined (__cplusplus) extern "C" { #endif /* ***************************************** * Static allocation *******************************************/ /* It is possible to statically allocate FSEv05 CTable/DTable as a table of unsigned using below macros */ #define FSEv05_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ /* ***************************************** * Implementation of inlined functions *******************************************/ /* decompression */ typedef struct { U16 tableLog; U16 fastMode; } FSEv05_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSEv05_decode_t; /* size == U32 */ MEM_STATIC void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt) { const void* ptr = dt; const FSEv05_DTableHeader* const DTableH = (const FSEv05_DTableHeader*)ptr; DStatePtr->state = BITv05_readBits(bitD, DTableH->tableLog); BITv05_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSEv05_peakSymbol(FSEv05_DState_t* DStatePtr) { const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state]; return DInfo.symbol; } MEM_STATIC BYTE FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD) { const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BITv05_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC BYTE FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD) { const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BITv05_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr) { return DStatePtr->state == 0; } #if defined (__cplusplus) } #endif #endif /* FSEv05_STATIC_H */ /* ****************************************************************** FSEv05 : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSEv05_COMMONDEFS_ONLY /* ************************************************************** * Tuning parameters ****************************************************************/ /*!MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSEv05_MAX_MEMORY_USAGE 14 #define FSEv05_DEFAULT_MEMORY_USAGE 13 /*!FSEv05_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSEv05_MAX_SYMBOL_VALUE 255 /* ************************************************************** * template functions type & suffix ****************************************************************/ #define FSEv05_FUNCTION_TYPE BYTE #define FSEv05_FUNCTION_EXTENSION #define FSEv05_DECODE_TYPE FSEv05_decode_t #endif /* !FSEv05_COMMONDEFS_ONLY */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /* ************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /* *************************************************************** * Constants *****************************************************************/ #define FSEv05_MAX_TABLELOG (FSEv05_MAX_MEMORY_USAGE-2) #define FSEv05_MAX_TABLESIZE (1U< FSEv05_TABLELOG_ABSOLUTE_MAX #error "FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX is not supported" #endif /* ************************************************************** * Error Management ****************************************************************/ #define FSEv05_STATIC_ASSERT(c) { enum { FSEv05_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************************************** * Complex types ****************************************************************/ typedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)]; /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSEv05_FUNCTION_EXTENSION # error "FSEv05_FUNCTION_EXTENSION must be defined" #endif #ifndef FSEv05_FUNCTION_TYPE # error "FSEv05_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSEv05_CAT(X,Y) X##Y #define FSEv05_FUNCTION_NAME(X,Y) FSEv05_CAT(X,Y) #define FSEv05_TYPE_NAME(X,Y) FSEv05_CAT(X,Y) /* Function templates */ static U32 FSEv05_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } FSEv05_DTable* FSEv05_createDTable (unsigned tableLog) { if (tableLog > FSEv05_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv05_TABLELOG_ABSOLUTE_MAX; return (FSEv05_DTable*)malloc( FSEv05_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); } void FSEv05_freeDTable (FSEv05_DTable* dt) { free(dt); } size_t FSEv05_buildDTable(FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { FSEv05_DTableHeader DTableH; void* const tdPtr = dt+1; /* because dt is unsigned, 32-bits aligned on 32-bits */ FSEv05_DECODE_TYPE* const tableDecode = (FSEv05_DECODE_TYPE*) (tdPtr); const U32 tableSize = 1 << tableLog; const U32 tableMask = tableSize-1; const U32 step = FSEv05_tableStep(tableSize); U16 symbolNext[FSEv05_MAX_SYMBOL_VALUE+1]; U32 position = 0; U32 highThreshold = tableSize-1; const S16 largeLimit= (S16)(1 << (tableLog-1)); U32 noLarge = 1; U32 s; /* Sanity Checks */ if (maxSymbolValue > FSEv05_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSEv05_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ memset(tableDecode, 0, sizeof(FSEv05_FUNCTION_TYPE) * (maxSymbolValue+1) ); /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */ DTableH.tableLog = (U16)tableLog; for (s=0; s<=maxSymbolValue; s++) { if (normalizedCounter[s]==-1) { tableDecode[highThreshold--].symbol = (FSEv05_FUNCTION_TYPE)s; symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) noLarge=0; symbolNext[s] = normalizedCounter[s]; } } /* Spread symbols */ for (s=0; s<=maxSymbolValue; s++) { int i; for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ /* Build Decoding table */ { U32 i; for (i=0; i FSEv05_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { const short max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSEv05_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } if (remaining != 1) return ERROR(GENERIC); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); return ip-istart; } /*-******************************************************* * Decompression (Byte symbols) *********************************************************/ size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, BYTE symbolValue) { void* ptr = dt; FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr; void* dPtr = dt + 1; FSEv05_decode_t* const cell = (FSEv05_decode_t*)dPtr; DTableH->tableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits) { void* ptr = dt; FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr; void* dPtr = dt + 1; FSEv05_decode_t* const dinfo = (FSEv05_decode_t*)dPtr; const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSymbolValue = tableMask; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s<=maxSymbolValue; s++) { dinfo[s].newState = 0; dinfo[s].symbol = (BYTE)s; dinfo[s].nbBits = (BYTE)nbBits; } return 0; } FORCE_INLINE size_t FSEv05_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt, const unsigned fast) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-3; BITv05_DStream_t bitD; FSEv05_DState_t state1; FSEv05_DState_t state2; size_t errorCode; /* Init */ errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ if (FSEv05_isError(errorCode)) return errorCode; FSEv05_initDState(&state1, &bitD, dt); FSEv05_initDState(&state2, &bitD, dt); #define FSEv05_GETSYMBOL(statePtr) fast ? FSEv05_decodeSymbolFast(statePtr, &bitD) : FSEv05_decodeSymbol(statePtr, &bitD) /* 4 symbols per loop */ for ( ; (BITv05_reloadDStream(&bitD)==BITv05_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ BITv05_reloadDStream(&bitD); op[1] = FSEv05_GETSYMBOL(&state2); if (FSEv05_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BITv05_reloadDStream(&bitD) > BITv05_DStream_unfinished) { op+=2; break; } } op[2] = FSEv05_GETSYMBOL(&state1); if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BITv05_reloadDStream(&bitD); op[3] = FSEv05_GETSYMBOL(&state2); } /* tail */ /* note : BITv05_reloadDStream(&bitD) >= FSEv05_DStream_partiallyFilled; Ends at exactly BITv05_DStream_completed */ while (1) { if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state1))) ) break; *op++ = FSEv05_GETSYMBOL(&state1); if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state2))) ) break; *op++ = FSEv05_GETSYMBOL(&state2); } /* end ? */ if (BITv05_endOfDStream(&bitD) && FSEv05_endOfDState(&state1) && FSEv05_endOfDState(&state2)) return op-ostart; if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ return ERROR(corruption_detected); } size_t FSEv05_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt) { const void* ptr = dt; const FSEv05_DTableHeader* DTableH = (const FSEv05_DTableHeader*)ptr; const U32 fastMode = DTableH->fastMode; /* select fast mode (static) */ if (fastMode) return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSEv05_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSEv05_MAX_SYMBOL_VALUE; size_t errorCode; if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ /* normal FSEv05 decoding mode */ errorCode = FSEv05_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSEv05_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ ip += errorCode; cSrcSize -= errorCode; errorCode = FSEv05_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSEv05_isError(errorCode)) return errorCode; /* always return, even if it is an error code */ return FSEv05_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); } #endif /* FSEv05_COMMONDEFS_ONLY */ /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library header file Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef HUFF0_H #define HUFF0_H #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Huff0 simple functions ******************************************/ size_t HUFv05_decompress(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*! HUFv05_decompress(): Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'dstSize'. @dstSize : must be the **exact** size of original (uncompressed) data. Note : in contrast with FSEv05, HUFv05_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate. @return : size of regenerated data (== dstSize) or an error code, which can be tested using HUFv05_isError() */ /* **************************************** * Tool functions ******************************************/ /* Error Management */ unsigned HUFv05_isError(size_t code); /* tells if a return value is an error code */ const char* HUFv05_getErrorName(size_t code); /* provides error code string (useful for debugging) */ #if defined (__cplusplus) } #endif #endif /* HUF0_H */ /* ****************************************************************** Huff0 : Huffman codec, part of New Generation Entropy library header file, for static linking only Copyright (C) 2013-2016, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef HUF0_STATIC_H #define HUF0_STATIC_H #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Static allocation ******************************************/ /* static allocation of Huff0's DTable */ #define HUFv05_DTABLE_SIZE(maxTableLog) (1 + (1<= 199901L) /* C99 */) /* inline is defined */ #elif defined(_MSC_VER) # define inline __inline #else # define inline /* disable inline */ #endif #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /* ************************************************************** * Constants ****************************************************************/ #define HUFv05_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUFv05_MAX_TABLELOG. Beyond that value, code does not work */ #define HUFv05_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUFv05_ABSOLUTEMAX_TABLELOG */ #define HUFv05_DEFAULT_TABLELOG HUFv05_MAX_TABLELOG /* tableLog by default, when not specified */ #define HUFv05_MAX_SYMBOL_VALUE 255 #if (HUFv05_MAX_TABLELOG > HUFv05_ABSOLUTEMAX_TABLELOG) # error "HUFv05_MAX_TABLELOG is too large !" #endif /* ************************************************************** * Error Management ****************************************************************/ unsigned HUFv05_isError(size_t code) { return ERR_isError(code); } const char* HUFv05_getErrorName(size_t code) { return ERR_getErrorName(code); } #define HUFv05_STATIC_ASSERT(c) { enum { HUFv05_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ******************************************************* * Huff0 : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUFv05_DEltX2; /* single-symbol decoding */ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv05_DEltX4; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; /*! HUFv05_readStats Read compact Huffman tree, saved by HUFv05_writeCTable @huffWeight : destination buffer @return : size read from `src` */ static size_t HUFv05_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; U32 tableLog; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; U32 n; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ if (iSize >= (242)) { /* RLE */ static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, hwSize); iSize = 0; } else { /* Incompressible */ oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else { /* header compressed with FSEv05 (normal case) */ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSEv05_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ if (FSEv05_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUFv05_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); weightTotal = 0; for (n=0; n= HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ tableLog = BITv05_highbit32(weightTotal) + 1; if (tableLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); { /* determine last weight */ U32 total = 1 << tableLog; U32 rest = total - weightTotal; U32 verif = 1 << BITv05_highbit32(rest); U32 lastWeight = BITv05_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); *tableLogPtr = tableLog; return iSize+1; } /*-***************************/ /* single-symbol decoding */ /*-***************************/ size_t HUFv05_readDTableX2 (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUFv05_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; size_t iSize; U32 nbSymbols = 0; U32 n; U32 nextRankStart; void* const dtPtr = DTable + 1; HUFv05_DEltX2* const dt = (HUFv05_DEltX2*)dtPtr; HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv05_readStats(huffWeight, HUFv05_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUFv05_isError(iSize)) return iSize; /* check result */ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ DTable[0] = (U16)tableLog; /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } /* fill DTable */ for (n=0; n> 1; U32 i; HUFv05_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize; } static BYTE HUFv05_decodeSymbolX2(BITv05_DStream_t* Dstream, const HUFv05_DEltX2* dt, const U32 dtLog) { const size_t val = BITv05_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; BITv05_skipBits(Dstream, dt[val].nbBits); return c; } #define HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ *ptr++ = HUFv05_decodeSymbolX2(DStreamPtr, dt, dtLog) #define HUFv05_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \ HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) #define HUFv05_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) static inline size_t HUFv05_decodeStreamX2(BYTE* p, BITv05_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv05_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-4)) { HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr); HUFv05_DECODE_SYMBOLX2_1(p, bitDPtr); HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr); HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to the end */ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd)) HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, hence no need to reload */ while (p < pEnd) HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } size_t HUFv05_decompress1X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { BYTE* op = (BYTE*)dst; BYTE* const oend = op + dstSize; const U32 dtLog = DTable[0]; const void* dtPtr = DTable; const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr)+1; BITv05_DStream_t bitD; if (dstSize <= cSrcSize) return ERROR(dstSize_tooSmall); { size_t const errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize); if (HUFv05_isError(errorCode)) return errorCode; } HUFv05_decodeStreamX2(op, &bitD, oend, dt, dtLog); /* check */ if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected); return dstSize; } size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t errorCode; errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize); if (HUFv05_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUFv05_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } size_t HUFv05_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable; const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BITv05_DStream_t bitD1; BITv05_DStream_t bitD2; BITv05_DStream_t bitD3; BITv05_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BITv05_initDStream(&bitD1, istart1, length1); if (HUFv05_isError(errorCode)) return errorCode; errorCode = BITv05_initDStream(&bitD2, istart2, length2); if (HUFv05_isError(errorCode)) return errorCode; errorCode = BITv05_initDStream(&bitD3, istart3, length3); if (HUFv05_isError(errorCode)) return errorCode; errorCode = BITv05_initDStream(&bitD4, istart4, length4); if (HUFv05_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4); for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) { HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1); HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2); HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3); HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4); HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1); HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2); HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3); HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4); HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1); HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2); HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3); HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4); HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1); HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2); HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3); HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUFv05_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t errorCode; errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize); if (HUFv05_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUFv05_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /* *************************/ /* double-symbols decoding */ /* *************************/ static void HUFv05_fillDTableX4Level2(HUFv05_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUFv05_DEltX4 DElt; U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1]; U32 s; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ for (s=0; s= 1 */ rankVal[weight] += length; } } typedef U32 rankVal_t[HUFv05_ABSOLUTEMAX_TABLELOG][HUFv05_ABSOLUTEMAX_TABLELOG + 1]; static void HUFv05_fillDTableX4(HUFv05_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) { /* enough room for a second symbol */ U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUFv05_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { U32 i; const U32 end = start + length; HUFv05_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; for (i = start; i < end; i++) DTable[i] = DElt; } rankVal[weight] += length; } } size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize) { BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1]; sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1]; U32 rankStats[HUFv05_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; U32 rankStart0[HUFv05_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; U32* const rankStart = rankStart0+1; rankVal_t rankVal; U32 tableLog, maxW, sizeOfSort, nbSymbols; const U32 memLog = DTable[0]; size_t iSize; void* dtPtr = DTable; HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1; HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned)); /* if compilation fails here, assertion is false */ if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv05_readStats(weightList, HUFv05_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUFv05_isError(iSize)) return iSize; /* check result */ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w<=maxW; w++) { U32 current = nextRankStart; nextRankStart += rankStats[w]; rankStart[w] = current; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ sizeOfSort = nextRankStart; } /* sort symbols by weight */ { U32 s; for (s=0; s> consumed; } } } HUFv05_fillDTableX4(dt, memLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); return iSize; } static U32 HUFv05_decodeSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog) { const size_t val = BITv05_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BITv05_skipBits(DStream, dt[val].nbBits); return dt[val].length; } static U32 HUFv05_decodeLastSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog) { const size_t val = BITv05_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BITv05_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BITv05_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ } } return 1; } #define HUFv05_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUFv05_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \ ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUFv05_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) static inline size_t HUFv05_decodeStreamX4(BYTE* p, BITv05_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv05_DEltX4* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd-7)) { HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr); HUFv05_DECODE_SYMBOLX4_1(p, bitDPtr); HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr); HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr); } /* closer to the end */ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-2)) HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr); while (p <= pEnd-2) HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUFv05_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); return p-pStart; } size_t HUFv05_decompress1X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable) { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const U32 dtLog = DTable[0]; const void* const dtPtr = DTable; const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1; size_t errorCode; /* Init */ BITv05_DStream_t bitD; errorCode = BITv05_initDStream(&bitD, istart, cSrcSize); if (HUFv05_isError(errorCode)) return errorCode; /* finish bitStreams one by one */ HUFv05_decodeStreamX4(ostart, &bitD, oend, dt, dtLog); /* check */ if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected); /* decoded size */ return dstSize; } size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize); if (HUFv05_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv05_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } size_t HUFv05_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable; const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BITv05_DStream_t bitD1; BITv05_DStream_t bitD2; BITv05_DStream_t bitD3; BITv05_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BITv05_initDStream(&bitD1, istart1, length1); if (HUFv05_isError(errorCode)) return errorCode; errorCode = BITv05_initDStream(&bitD2, istart2, length2); if (HUFv05_isError(errorCode)) return errorCode; errorCode = BITv05_initDStream(&bitD3, istart3, length3); if (HUFv05_isError(errorCode)) return errorCode; errorCode = BITv05_initDStream(&bitD4, istart4, length4); if (HUFv05_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4); for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) { HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1); HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2); HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3); HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4); HUFv05_DECODE_SYMBOLX4_1(op1, &bitD1); HUFv05_DECODE_SYMBOLX4_1(op2, &bitD2); HUFv05_DECODE_SYMBOLX4_1(op3, &bitD3); HUFv05_DECODE_SYMBOLX4_1(op4, &bitD4); HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1); HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2); HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3); HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4); HUFv05_DECODE_SYMBOLX4_0(op1, &bitD1); HUFv05_DECODE_SYMBOLX4_0(op2, &bitD2); HUFv05_DECODE_SYMBOLX4_0(op3, &bitD3); HUFv05_DECODE_SYMBOLX4_0(op4, &bitD4); endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUFv05_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); HUFv05_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); HUFv05_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); HUFv05_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize); if (HUFv05_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv05_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /* ********************************/ /* Generic decompression selector */ /* ********************************/ typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { /* single, double, quad */ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { static const decompressionAlgo decompress[3] = { HUFv05_decompress4X2, HUFv05_decompress4X4, NULL }; /* estimate decompression time */ U32 Q; const U32 D256 = (U32)(dstSize >> 8); U32 Dtime[3]; U32 algoNb = 0; int n; /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize >= dstSize) return ERROR(corruption_detected); /* invalid, or not compressed, but not compressed already dealt with */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ /* decoder timing evaluation */ Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ for (n=0; n<3; n++) Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256); Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */ if (Dtime[1] < Dtime[0]) algoNb = 1; return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); /* return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */ /* return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */ /* return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams quad-symbols decoding */ } /* zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * HEAPMODE : * Select how default decompression function ZSTDv05_decompress() will allocate memory, * in memory stack (0), or in memory heap (1, requires malloc()) */ #ifndef ZSTDv05_HEAPMODE # define ZSTDv05_HEAPMODE 1 #endif /*-******************************************************* * Dependencies *********************************************************/ #include /* calloc */ #include /* memcpy, memmove */ #include /* debug only : printf */ /*-******************************************************* * Compiler specifics *********************************************************/ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif /*-************************************* * Local types ***************************************/ typedef struct { blockType_t blockType; U32 origSize; } blockProperties_t; /* ******************************************************* * Memory operations **********************************************************/ static void ZSTDv05_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } /* ************************************* * Error Management ***************************************/ /*! ZSTDv05_isError() : * tells if a return value is an error code */ unsigned ZSTDv05_isError(size_t code) { return ERR_isError(code); } /*! ZSTDv05_getErrorName() : * provides error code string (useful for debugging) */ const char* ZSTDv05_getErrorName(size_t code) { return ERR_getErrorName(code); } /* ************************************************************* * Context management ***************************************************************/ typedef enum { ZSTDv05ds_getFrameHeaderSize, ZSTDv05ds_decodeFrameHeader, ZSTDv05ds_decodeBlockHeader, ZSTDv05ds_decompressBlock } ZSTDv05_dStage; struct ZSTDv05_DCtx_s { FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)]; FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)]; FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)]; unsigned hufTableX4[HUFv05_DTABLE_SIZE(HufLog)]; const void* previousDstEnd; const void* base; const void* vBase; const void* dictEnd; size_t expected; size_t headerSize; ZSTDv05_parameters params; blockType_t bType; /* used in ZSTDv05_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ ZSTDv05_dStage stage; U32 flagStaticTables; const BYTE* litPtr; size_t litSize; BYTE litBuffer[BLOCKSIZE + WILDCOPY_OVERLENGTH]; BYTE headerBuffer[ZSTDv05_frameHeaderSize_max]; }; /* typedef'd to ZSTDv05_DCtx within "zstd_static.h" */ size_t ZSTDv05_sizeofDCtx (void); /* Hidden declaration */ size_t ZSTDv05_sizeofDCtx (void) { return sizeof(ZSTDv05_DCtx); } size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx) { dctx->expected = ZSTDv05_frameHeaderSize_min; dctx->stage = ZSTDv05ds_getFrameHeaderSize; dctx->previousDstEnd = NULL; dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; dctx->hufTableX4[0] = HufLog; dctx->flagStaticTables = 0; return 0; } ZSTDv05_DCtx* ZSTDv05_createDCtx(void) { ZSTDv05_DCtx* dctx = (ZSTDv05_DCtx*)malloc(sizeof(ZSTDv05_DCtx)); if (dctx==NULL) return NULL; ZSTDv05_decompressBegin(dctx); return dctx; } size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx) { free(dctx); return 0; /* reserved as a potential error code in the future */ } void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx) { memcpy(dstDCtx, srcDCtx, sizeof(ZSTDv05_DCtx) - (BLOCKSIZE+WILDCOPY_OVERLENGTH + ZSTDv05_frameHeaderSize_max)); /* no need to copy workspace */ } /* ************************************************************* * Decompression section ***************************************************************/ /* Frame format description Frame Header - [ Block Header - Block ] - Frame End 1) Frame Header - 4 bytes - Magic Number : ZSTDv05_MAGICNUMBER (defined within zstd_internal.h) - 1 byte - Window Descriptor 2) Block Header - 3 bytes, starting with a 2-bits descriptor Uncompressed, Compressed, Frame End, unused 3) Block See Block Format Description 4) Frame End - 3 bytes, compatible with Block Header */ /* Block format description Block = Literal Section - Sequences Section Prerequisite : size of (compressed) block, maximum size of regenerated data 1) Literal Section 1.1) Header : 1-5 bytes flags: 2 bits 00 compressed by Huff0 01 unused 10 is Raw (uncompressed) 11 is Rle Note : using 01 => Huff0 with precomputed table ? Note : delta map ? => compressed ? 1.1.1) Huff0-compressed literal block : 3-5 bytes srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream srcSize < 1 KB => 3 bytes (2-2-10-10) srcSize < 16KB => 4 bytes (2-2-14-14) else => 5 bytes (2-2-18-18) big endian convention 1.1.2) Raw (uncompressed) literal block header : 1-3 bytes size : 5 bits: (IS_RAW<<6) + (0<<4) + size 12 bits: (IS_RAW<<6) + (2<<4) + (size>>8) size&255 20 bits: (IS_RAW<<6) + (3<<4) + (size>>16) size>>8&255 size&255 1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes size : 5 bits: (IS_RLE<<6) + (0<<4) + size 12 bits: (IS_RLE<<6) + (2<<4) + (size>>8) size&255 20 bits: (IS_RLE<<6) + (3<<4) + (size>>16) size>>8&255 size&255 1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream srcSize < 1 KB => 3 bytes (2-2-10-10) srcSize < 16KB => 4 bytes (2-2-14-14) else => 5 bytes (2-2-18-18) big endian convention 1- CTable available (stored into workspace ?) 2- Small input (fast heuristic ? Full comparison ? depend on clevel ?) 1.2) Literal block content 1.2.1) Huff0 block, using sizes from header See Huff0 format 1.2.2) Huff0 block, using prepared table 1.2.3) Raw content 1.2.4) single byte 2) Sequences section TO DO */ /** ZSTDv05_decodeFrameHeader_Part1() : * decode the 1st part of the Frame Header, which tells Frame Header size. * srcSize must be == ZSTDv05_frameHeaderSize_min. * @return : the full size of the Frame Header */ static size_t ZSTDv05_decodeFrameHeader_Part1(ZSTDv05_DCtx* zc, const void* src, size_t srcSize) { U32 magicNumber; if (srcSize != ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong); magicNumber = MEM_readLE32(src); if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown); zc->headerSize = ZSTDv05_frameHeaderSize_min; return zc->headerSize; } size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize) { U32 magicNumber; if (srcSize < ZSTDv05_frameHeaderSize_min) return ZSTDv05_frameHeaderSize_max; magicNumber = MEM_readLE32(src); if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown); memset(params, 0, sizeof(*params)); params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTDv05_WINDOWLOG_ABSOLUTEMIN; if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported); /* reserved bits */ return 0; } /** ZSTDv05_decodeFrameHeader_Part2() : * decode the full Frame Header. * srcSize must be the size provided by ZSTDv05_decodeFrameHeader_Part1(). * @return : 0, or an error code, which can be tested using ZSTDv05_isError() */ static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src, size_t srcSize) { size_t result; if (srcSize != zc->headerSize) return ERROR(srcSize_wrong); result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize); if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported); return result; } static size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { const BYTE* const in = (const BYTE* const)src; BYTE headerFlags; U32 cSize; if (srcSize < 3) return ERROR(srcSize_wrong); headerFlags = *in; cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); bpPtr->blockType = (blockType_t)(headerFlags >> 6); bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; if (bpPtr->blockType == bt_end) return 0; if (bpPtr->blockType == bt_rle) return 1; return cSize; } static size_t ZSTDv05_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (dst==NULL) return ERROR(dstSize_tooSmall); if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); memcpy(dst, src, srcSize); return srcSize; } /*! ZSTDv05_decodeLiteralsBlock() : @return : nb of bytes read from src (< srcSize ) */ static size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx, const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ { const BYTE* const istart = (const BYTE*) src; /* any compressed block with literals segment must be at least this size */ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); switch(istart[0]>> 6) { case IS_HUFv05: { size_t litSize, litCSize, singleStream=0; U32 lhSize = ((istart[0]) >> 4) & 3; if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ /* 2 - 2 - 10 - 10 */ lhSize=3; singleStream = istart[0] & 16; litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2); litCSize = ((istart[1] & 3) << 8) + istart[2]; break; case 2: /* 2 - 2 - 14 - 14 */ lhSize=4; litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6); litCSize = ((istart[2] & 63) << 8) + istart[3]; break; case 3: /* 2 - 2 - 18 - 18 */ lhSize=5; litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2); litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4]; break; } if (litSize > BLOCKSIZE) return ERROR(corruption_detected); if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); if (HUFv05_isError(singleStream ? HUFv05_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) : HUFv05_decompress (dctx->litBuffer, litSize, istart+lhSize, litCSize) )) return ERROR(corruption_detected); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case IS_PCH: { size_t errorCode; size_t litSize, litCSize; U32 lhSize = ((istart[0]) >> 4) & 3; if (lhSize != 1) /* only case supported for now : small litSize, single stream */ return ERROR(corruption_detected); if (!dctx->flagStaticTables) return ERROR(dictionary_corrupted); /* 2 - 2 - 10 - 10 */ lhSize=3; litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2); litCSize = ((istart[1] & 3) << 8) + istart[2]; if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); errorCode = HUFv05_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4); if (HUFv05_isError(errorCode)) return ERROR(corruption_detected); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case IS_RAW: { size_t litSize; U32 lhSize = ((istart[0]) >> 4) & 3; switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ lhSize=1; litSize = istart[0] & 31; break; case 2: litSize = ((istart[0] & 15) << 8) + istart[1]; break; case 3: litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2]; break; } if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ if (litSize+lhSize > srcSize) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart+lhSize, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; return lhSize+litSize; } case IS_RLE: { size_t litSize; U32 lhSize = ((istart[0]) >> 4) & 3; switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ lhSize = 1; litSize = istart[0] & 31; break; case 2: litSize = ((istart[0] & 15) << 8) + istart[1]; break; case 3: litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2]; if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ break; } if (litSize > BLOCKSIZE) return ERROR(corruption_detected); memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; } default: return ERROR(corruption_detected); /* impossible */ } } static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb, const void* src, size_t srcSize, U32 flagStaticTable) { const BYTE* const istart = (const BYTE* const)src; const BYTE* ip = istart; const BYTE* const iend = istart + srcSize; U32 LLtype, Offtype, MLtype; unsigned LLlog, Offlog, MLlog; size_t dumpsLength; /* check */ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); /* SeqHead */ *nbSeq = *ip++; if (*nbSeq==0) return 1; if (*nbSeq >= 128) { if (ip >= iend) return ERROR(srcSize_wrong); *nbSeq = ((nbSeq[0]-128)<<8) + *ip++; } if (ip >= iend) return ERROR(srcSize_wrong); LLtype = *ip >> 6; Offtype = (*ip >> 4) & 3; MLtype = (*ip >> 2) & 3; if (*ip & 2) { if (ip+3 > iend) return ERROR(srcSize_wrong); dumpsLength = ip[2]; dumpsLength += ip[1] << 8; ip += 3; } else { if (ip+2 > iend) return ERROR(srcSize_wrong); dumpsLength = ip[1]; dumpsLength += (ip[0] & 1) << 8; ip += 2; } *dumpsPtr = ip; ip += dumpsLength; *dumpsLengthPtr = dumpsLength; /* check */ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ /* sequences */ { S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL >= MaxOff */ size_t headerSize; /* Build DTables */ switch(LLtype) { case FSEv05_ENCODING_RLE : LLlog = 0; FSEv05_buildDTable_rle(DTableLL, *ip++); break; case FSEv05_ENCODING_RAW : LLlog = LLbits; FSEv05_buildDTable_raw(DTableLL, LLbits); break; case FSEv05_ENCODING_STATIC: if (!flagStaticTable) return ERROR(corruption_detected); break; case FSEv05_ENCODING_DYNAMIC : default : /* impossible */ { unsigned max = MaxLL; headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip); if (FSEv05_isError(headerSize)) return ERROR(GENERIC); if (LLlog > LLFSEv05Log) return ERROR(corruption_detected); ip += headerSize; FSEv05_buildDTable(DTableLL, norm, max, LLlog); } } switch(Offtype) { case FSEv05_ENCODING_RLE : Offlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSEv05_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */ break; case FSEv05_ENCODING_RAW : Offlog = Offbits; FSEv05_buildDTable_raw(DTableOffb, Offbits); break; case FSEv05_ENCODING_STATIC: if (!flagStaticTable) return ERROR(corruption_detected); break; case FSEv05_ENCODING_DYNAMIC : default : /* impossible */ { unsigned max = MaxOff; headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip); if (FSEv05_isError(headerSize)) return ERROR(GENERIC); if (Offlog > OffFSEv05Log) return ERROR(corruption_detected); ip += headerSize; FSEv05_buildDTable(DTableOffb, norm, max, Offlog); } } switch(MLtype) { case FSEv05_ENCODING_RLE : MLlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSEv05_buildDTable_rle(DTableML, *ip++); break; case FSEv05_ENCODING_RAW : MLlog = MLbits; FSEv05_buildDTable_raw(DTableML, MLbits); break; case FSEv05_ENCODING_STATIC: if (!flagStaticTable) return ERROR(corruption_detected); break; case FSEv05_ENCODING_DYNAMIC : default : /* impossible */ { unsigned max = MaxML; headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip); if (FSEv05_isError(headerSize)) return ERROR(GENERIC); if (MLlog > MLFSEv05Log) return ERROR(corruption_detected); ip += headerSize; FSEv05_buildDTable(DTableML, norm, max, MLlog); } } } return ip-istart; } typedef struct { size_t litLength; size_t matchLength; size_t offset; } seq_t; typedef struct { BITv05_DStream_t DStream; FSEv05_DState_t stateLL; FSEv05_DState_t stateOffb; FSEv05_DState_t stateML; size_t prevOffset; const BYTE* dumps; const BYTE* dumpsEnd; } seqState_t; static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState) { size_t litLength; size_t prevOffset; size_t offset; size_t matchLength; const BYTE* dumps = seqState->dumps; const BYTE* const de = seqState->dumpsEnd; /* Literal length */ litLength = FSEv05_peakSymbol(&(seqState->stateLL)); prevOffset = litLength ? seq->offset : seqState->prevOffset; if (litLength == MaxLL) { const U32 add = *dumps++; if (add < 255) litLength += add; else if (dumps + 2 <= de) { litLength = MEM_readLE16(dumps); dumps += 2; if ((litLength & 1) && dumps < de) { litLength += *dumps << 16; dumps += 1; } litLength>>=1; } if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */ } /* Offset */ { static const U32 offsetPrefix[MaxOff+1] = { 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 }; U32 offsetCode = FSEv05_peakSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */ U32 nbBits = offsetCode - 1; if (offsetCode==0) nbBits = 0; /* cmove */ offset = offsetPrefix[offsetCode] + BITv05_readBits(&(seqState->DStream), nbBits); if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream)); if (offsetCode==0) offset = prevOffset; /* repcode, cmove */ if (offsetCode | !litLength) seqState->prevOffset = seq->offset; /* cmove */ FSEv05_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* update */ } /* Literal length update */ FSEv05_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); /* update */ if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream)); /* MatchLength */ matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); if (matchLength == MaxML) { const U32 add = dumps>= 1; } if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */ } matchLength += MINMATCH; /* save result */ seq->litLength = litLength; seq->offset = offset; seq->matchLength = matchLength; seqState->dumps = dumps; #if 0 /* debug */ { static U64 totalDecoded = 0; printf("pos %6u : %3u literals & match %3u bytes at distance %6u \n", (U32)(totalDecoded), (U32)litLength, (U32)matchLength, (U32)offset); totalDecoded += litLength + matchLength; } #endif } static size_t ZSTDv05_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) { static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ BYTE* const oLitEnd = op + sequence.litLength; const size_t sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_8 = oend-8; const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; /* check */ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ /* copy Literals */ ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - base)) { /* offset beyond prefix */ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); match = dictEnd - (base-match); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = base; if (op > oend_8 || sequence.matchLength < MINMATCH) { while (op < oMatchEnd) *op++ = *match++; return sequenceLength; } } } /* Requirement: op <= oend_8 */ /* match within prefix */ if (sequence.offset < 8) { /* close range match, overlap */ const int sub2 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTDv05_copy4(op+4, match); match -= sub2; } else { ZSTDv05_copy8(op, match); } op += 8; match += 8; if (oMatchEnd > oend-(16-MINMATCH)) { if (op < oend_8) { ZSTDv05_wildcopy(op, match, oend_8 - op); match += oend_8 - op; op = oend_8; } while (op < oMatchEnd) *op++ = *match++; } else { ZSTDv05_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ } return sequenceLength; } static size_t ZSTDv05_decompressSequences( ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t errorCode, dumpsLength=0; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; int nbSeq=0; const BYTE* dumps = NULL; unsigned* DTableLL = dctx->LLTable; unsigned* DTableML = dctx->MLTable; unsigned* DTableOffb = dctx->OffTable; const BYTE* const base = (const BYTE*) (dctx->base); const BYTE* const vBase = (const BYTE*) (dctx->vBase); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); /* Build Decoding Tables */ errorCode = ZSTDv05_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, DTableLL, DTableML, DTableOffb, ip, seqSize, dctx->flagStaticTables); if (ZSTDv05_isError(errorCode)) return errorCode; ip += errorCode; /* Regen sequences */ if (nbSeq) { seq_t sequence; seqState_t seqState; memset(&sequence, 0, sizeof(sequence)); sequence.offset = REPCODE_STARTVALUE; seqState.dumps = dumps; seqState.dumpsEnd = dumps + dumpsLength; seqState.prevOffset = REPCODE_STARTVALUE; errorCode = BITv05_initDStream(&(seqState.DStream), ip, iend-ip); if (ERR_isError(errorCode)) return ERROR(corruption_detected); FSEv05_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); FSEv05_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); FSEv05_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); for ( ; (BITv05_reloadDStream(&(seqState.DStream)) <= BITv05_DStream_completed) && nbSeq ; ) { size_t oneSeqSize; nbSeq--; ZSTDv05_decodeSequence(&sequence, &seqState); oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); if (ZSTDv05_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } /* check if reached exact end */ if (nbSeq) return ERROR(corruption_detected); } /* last literal segment */ { size_t lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); if (lastLLSize > 0) { memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static void ZSTDv05_checkContinuity(ZSTDv05_DCtx* dctx, const void* dst) { if (dst != dctx->previousDstEnd) { /* not contiguous */ dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dst; dctx->previousDstEnd = dst; } } static size_t ZSTDv05_decompressBlock_internal(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; size_t litCSize; if (srcSize >= BLOCKSIZE) return ERROR(srcSize_wrong); /* Decode literals sub-block */ litCSize = ZSTDv05_decodeLiteralsBlock(dctx, src, srcSize); if (ZSTDv05_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; return ZSTDv05_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); } size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { ZSTDv05_checkContinuity(dctx, dst); return ZSTDv05_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); } /*! ZSTDv05_decompress_continueDCtx * dctx must have been properly initialized */ static size_t ZSTDv05_decompress_continueDCtx(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* iend = ip + srcSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t remainingSize = srcSize; blockProperties_t blockProperties; memset(&blockProperties, 0, sizeof(blockProperties)); /* Frame Header */ { size_t frameHeaderSize; if (srcSize < ZSTDv05_frameHeaderSize_min+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong); frameHeaderSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min); if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize; if (srcSize < frameHeaderSize+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong); ip += frameHeaderSize; remainingSize -= frameHeaderSize; frameHeaderSize = ZSTDv05_decodeFrameHeader_Part2(dctx, src, frameHeaderSize); if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize; } /* Loop on each block */ while (1) { size_t decodedSize=0; size_t cBlockSize = ZSTDv05_getcBlockSize(ip, iend-ip, &blockProperties); if (ZSTDv05_isError(cBlockSize)) return cBlockSize; ip += ZSTDv05_blockHeaderSize; remainingSize -= ZSTDv05_blockHeaderSize; if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: decodedSize = ZSTDv05_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize); break; case bt_raw : decodedSize = ZSTDv05_copyRawBlock(op, oend-op, ip, cBlockSize); break; case bt_rle : return ERROR(GENERIC); /* not yet supported */ break; case bt_end : /* end of frame */ if (remainingSize) return ERROR(srcSize_wrong); break; default: return ERROR(GENERIC); /* impossible */ } if (cBlockSize == 0) break; /* bt_end */ if (ZSTDv05_isError(decodedSize)) return decodedSize; op += decodedSize; ip += cBlockSize; remainingSize -= cBlockSize; } return op-ostart; } size_t ZSTDv05_decompress_usingPreparedDCtx(ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* refDCtx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { ZSTDv05_copyDCtx(dctx, refDCtx); ZSTDv05_checkContinuity(dctx, dst); return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize); } size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, const void* dict, size_t dictSize) { ZSTDv05_decompressBegin_usingDict(dctx, dict, dictSize); ZSTDv05_checkContinuity(dctx, dst); return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize); } size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return ZSTDv05_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0); } size_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { #if defined(ZSTDv05_HEAPMODE) && (ZSTDv05_HEAPMODE==1) size_t regenSize; ZSTDv05_DCtx* dctx = ZSTDv05_createDCtx(); if (dctx==NULL) return ERROR(memory_allocation); regenSize = ZSTDv05_decompressDCtx(dctx, dst, maxDstSize, src, srcSize); ZSTDv05_freeDCtx(dctx); return regenSize; #else ZSTDv05_DCtx dctx; return ZSTDv05_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize); #endif } /* ZSTD_errorFrameSizeInfoLegacy() : assumes `cSize` and `dBound` are _not_ NULL */ static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) { *cSize = ret; *dBound = ZSTD_CONTENTSIZE_ERROR; } void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) { const BYTE* ip = (const BYTE*)src; size_t remainingSize = srcSize; size_t nbBlocks = 0; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTDv05_frameHeaderSize_min) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); return; } ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min; /* Loop on each block */ while (1) { size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTDv05_isError(cBlockSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize); return; } ip += ZSTDv05_blockHeaderSize; remainingSize -= ZSTDv05_blockHeaderSize; if (cBlockSize > remainingSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } if (cBlockSize == 0) break; /* bt_end */ ip += cBlockSize; remainingSize -= cBlockSize; nbBlocks++; } *cSize = ip - (const BYTE*)src; *dBound = nbBlocks * BLOCKSIZE; } /* ****************************** * Streaming Decompression API ********************************/ size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx) { return dctx->expected; } size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { /* Sanity check */ if (srcSize != dctx->expected) return ERROR(srcSize_wrong); ZSTDv05_checkContinuity(dctx, dst); /* Decompress : frame header; part 1 */ switch (dctx->stage) { case ZSTDv05ds_getFrameHeaderSize : /* get frame header size */ if (srcSize != ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */ dctx->headerSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min); if (ZSTDv05_isError(dctx->headerSize)) return dctx->headerSize; memcpy(dctx->headerBuffer, src, ZSTDv05_frameHeaderSize_min); if (dctx->headerSize > ZSTDv05_frameHeaderSize_min) return ERROR(GENERIC); /* should never happen */ dctx->expected = 0; /* not necessary to copy more */ /* fallthrough */ case ZSTDv05ds_decodeFrameHeader: /* get frame header */ { size_t const result = ZSTDv05_decodeFrameHeader_Part2(dctx, dctx->headerBuffer, dctx->headerSize); if (ZSTDv05_isError(result)) return result; dctx->expected = ZSTDv05_blockHeaderSize; dctx->stage = ZSTDv05ds_decodeBlockHeader; return 0; } case ZSTDv05ds_decodeBlockHeader: { /* Decode block header */ blockProperties_t bp; size_t blockSize = ZSTDv05_getcBlockSize(src, ZSTDv05_blockHeaderSize, &bp); if (ZSTDv05_isError(blockSize)) return blockSize; if (bp.blockType == bt_end) { dctx->expected = 0; dctx->stage = ZSTDv05ds_getFrameHeaderSize; } else { dctx->expected = blockSize; dctx->bType = bp.blockType; dctx->stage = ZSTDv05ds_decompressBlock; } return 0; } case ZSTDv05ds_decompressBlock: { /* Decompress : block content */ size_t rSize; switch(dctx->bType) { case bt_compressed: rSize = ZSTDv05_decompressBlock_internal(dctx, dst, maxDstSize, src, srcSize); break; case bt_raw : rSize = ZSTDv05_copyRawBlock(dst, maxDstSize, src, srcSize); break; case bt_rle : return ERROR(GENERIC); /* not yet handled */ break; case bt_end : /* should never happen (filtered at phase 1) */ rSize = 0; break; default: return ERROR(GENERIC); /* impossible */ } dctx->stage = ZSTDv05ds_decodeBlockHeader; dctx->expected = ZSTDv05_blockHeaderSize; dctx->previousDstEnd = (char*)dst + rSize; return rSize; } default: return ERROR(GENERIC); /* impossible */ } } static void ZSTDv05_refDictContent(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize) { dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dict; dctx->previousDstEnd = (const char*)dict + dictSize; } static size_t ZSTDv05_loadEntropy(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize) { size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize; short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue=MaxOff, offcodeLog; short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; hSize = HUFv05_readDTableX4(dctx->hufTableX4, dict, dictSize); if (HUFv05_isError(hSize)) return ERROR(dictionary_corrupted); dict = (const char*)dict + hSize; dictSize -= hSize; offcodeHeaderSize = FSEv05_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize); if (FSEv05_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (offcodeLog > OffFSEv05Log) return ERROR(dictionary_corrupted); errorCode = FSEv05_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog); if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted); dict = (const char*)dict + offcodeHeaderSize; dictSize -= offcodeHeaderSize; matchlengthHeaderSize = FSEv05_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize); if (FSEv05_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); if (matchlengthLog > MLFSEv05Log) return ERROR(dictionary_corrupted); errorCode = FSEv05_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog); if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted); dict = (const char*)dict + matchlengthHeaderSize; dictSize -= matchlengthHeaderSize; litlengthHeaderSize = FSEv05_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize); if (litlengthLog > LLFSEv05Log) return ERROR(dictionary_corrupted); if (FSEv05_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); errorCode = FSEv05_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog); if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted); dctx->flagStaticTables = 1; return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize; } static size_t ZSTDv05_decompress_insertDictionary(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize) { size_t eSize; U32 magic = MEM_readLE32(dict); if (magic != ZSTDv05_DICT_MAGIC) { /* pure content mode */ ZSTDv05_refDictContent(dctx, dict, dictSize); return 0; } /* load entropy tables */ dict = (const char*)dict + 4; dictSize -= 4; eSize = ZSTDv05_loadEntropy(dctx, dict, dictSize); if (ZSTDv05_isError(eSize)) return ERROR(dictionary_corrupted); /* reference dictionary content */ dict = (const char*)dict + eSize; dictSize -= eSize; ZSTDv05_refDictContent(dctx, dict, dictSize); return 0; } size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize) { size_t errorCode; errorCode = ZSTDv05_decompressBegin(dctx); if (ZSTDv05_isError(errorCode)) return errorCode; if (dict && dictSize) { errorCode = ZSTDv05_decompress_insertDictionary(dctx, dict, dictSize); if (ZSTDv05_isError(errorCode)) return ERROR(dictionary_corrupted); } return 0; } /* Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /* The objects defined into this file should be considered experimental. * They are not labelled stable, as their prototype may change in the future. * You can use them for tests, provide feedback, or if you can endure risk of future changes. */ /* ************************************* * Constants ***************************************/ static size_t ZBUFFv05_blockHeaderSize = 3; /* *** Compression *** */ static size_t ZBUFFv05_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { size_t length = MIN(maxDstSize, srcSize); if (length > 0) { memcpy(dst, src, length); } return length; } /** ************************************************ * Streaming decompression * * A ZBUFFv05_DCtx object is required to track streaming operation. * Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources. * Use ZBUFFv05_decompressInit() to start a new decompression operation. * ZBUFFv05_DCtx objects can be reused multiple times. * * Use ZBUFFv05_decompressContinue() repetitively to consume your input. * *srcSizePtr and *maxDstSizePtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr. * Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input. * The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst . * return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) * or 0 when a frame is completely decoded * or an error code, which can be tested using ZBUFFv05_isError(). * * Hint : recommended buffer sizes (not compulsory) * output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded. * input : just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * **************************************************/ typedef enum { ZBUFFv05ds_init, ZBUFFv05ds_readHeader, ZBUFFv05ds_loadHeader, ZBUFFv05ds_decodeHeader, ZBUFFv05ds_read, ZBUFFv05ds_load, ZBUFFv05ds_flush } ZBUFFv05_dStage; /* *** Resource management *** */ #define ZSTDv05_frameHeaderSize_max 5 /* too magical, should come from reference */ struct ZBUFFv05_DCtx_s { ZSTDv05_DCtx* zc; ZSTDv05_parameters params; char* inBuff; size_t inBuffSize; size_t inPos; char* outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t hPos; ZBUFFv05_dStage stage; unsigned char headerBuffer[ZSTDv05_frameHeaderSize_max]; }; /* typedef'd to ZBUFFv05_DCtx within "zstd_buffered.h" */ ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void) { ZBUFFv05_DCtx* zbc = (ZBUFFv05_DCtx*)malloc(sizeof(ZBUFFv05_DCtx)); if (zbc==NULL) return NULL; memset(zbc, 0, sizeof(*zbc)); zbc->zc = ZSTDv05_createDCtx(); zbc->stage = ZBUFFv05ds_init; return zbc; } size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* zbc) { if (zbc==NULL) return 0; /* support free on null */ ZSTDv05_freeDCtx(zbc->zc); free(zbc->inBuff); free(zbc->outBuff); free(zbc); return 0; } /* *** Initialization *** */ size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* zbc, const void* dict, size_t dictSize) { zbc->stage = ZBUFFv05ds_readHeader; zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = 0; return ZSTDv05_decompressBegin_usingDict(zbc->zc, dict, dictSize); } size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* zbc) { return ZBUFFv05_decompressInitDictionary(zbc, NULL, 0); } /* *** Decompression *** */ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr) { const char* const istart = (const char*)src; const char* ip = istart; const char* const iend = istart + *srcSizePtr; char* const ostart = (char*)dst; char* op = ostart; char* const oend = ostart + *maxDstSizePtr; U32 notDone = 1; while (notDone) { switch(zbc->stage) { case ZBUFFv05ds_init : return ERROR(init_missing); case ZBUFFv05ds_readHeader : /* read header from src */ { size_t headerSize = ZSTDv05_getFrameParams(&(zbc->params), src, *srcSizePtr); if (ZSTDv05_isError(headerSize)) return headerSize; if (headerSize) { /* not enough input to decode header : tell how many bytes would be necessary */ memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr); zbc->hPos += *srcSizePtr; *maxDstSizePtr = 0; zbc->stage = ZBUFFv05ds_loadHeader; return headerSize - zbc->hPos; } zbc->stage = ZBUFFv05ds_decodeHeader; break; } /* fall-through */ case ZBUFFv05ds_loadHeader: /* complete header from src */ { size_t headerSize = ZBUFFv05_limitCopy( zbc->headerBuffer + zbc->hPos, ZSTDv05_frameHeaderSize_max - zbc->hPos, src, *srcSizePtr); zbc->hPos += headerSize; ip += headerSize; headerSize = ZSTDv05_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos); if (ZSTDv05_isError(headerSize)) return headerSize; if (headerSize) { /* not enough input to decode header : tell how many bytes would be necessary */ *maxDstSizePtr = 0; return headerSize - zbc->hPos; } /* zbc->stage = ZBUFFv05ds_decodeHeader; break; */ /* useless : stage follows */ } /* fall-through */ case ZBUFFv05ds_decodeHeader: /* apply header to create / resize buffers */ { size_t neededOutSize = (size_t)1 << zbc->params.windowLog; size_t neededInSize = BLOCKSIZE; /* a block is never > BLOCKSIZE */ if (zbc->inBuffSize < neededInSize) { free(zbc->inBuff); zbc->inBuffSize = neededInSize; zbc->inBuff = (char*)malloc(neededInSize); if (zbc->inBuff == NULL) return ERROR(memory_allocation); } if (zbc->outBuffSize < neededOutSize) { free(zbc->outBuff); zbc->outBuffSize = neededOutSize; zbc->outBuff = (char*)malloc(neededOutSize); if (zbc->outBuff == NULL) return ERROR(memory_allocation); } } if (zbc->hPos) { /* some data already loaded into headerBuffer : transfer into inBuff */ memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos); zbc->inPos = zbc->hPos; zbc->hPos = 0; zbc->stage = ZBUFFv05ds_load; break; } zbc->stage = ZBUFFv05ds_read; /* fall-through */ case ZBUFFv05ds_read: { size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc); if (neededInSize==0) { /* end of frame */ zbc->stage = ZBUFFv05ds_init; notDone = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* directly decode from src */ size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc, zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart, ip, neededInSize); if (ZSTDv05_isError(decodedSize)) return decodedSize; ip += neededInSize; if (!decodedSize) break; /* this was just a header */ zbc->outEnd = zbc->outStart + decodedSize; zbc->stage = ZBUFFv05ds_flush; break; } if (ip==iend) { notDone = 0; break; } /* no more input */ zbc->stage = ZBUFFv05ds_load; } /* fall-through */ case ZBUFFv05ds_load: { size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc); size_t toLoad = neededInSize - zbc->inPos; /* should always be <= remaining space within inBuff */ size_t loadedSize; if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected); /* should never happen */ loadedSize = ZBUFFv05_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip); ip += loadedSize; zbc->inPos += loadedSize; if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */ { size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc, zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart, zbc->inBuff, neededInSize); if (ZSTDv05_isError(decodedSize)) return decodedSize; zbc->inPos = 0; /* input is consumed */ if (!decodedSize) { zbc->stage = ZBUFFv05ds_read; break; } /* this was just a header */ zbc->outEnd = zbc->outStart + decodedSize; zbc->stage = ZBUFFv05ds_flush; /* break; */ /* ZBUFFv05ds_flush follows */ } } /* fall-through */ case ZBUFFv05ds_flush: { size_t toFlushSize = zbc->outEnd - zbc->outStart; size_t flushedSize = ZBUFFv05_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize); op += flushedSize; zbc->outStart += flushedSize; if (flushedSize == toFlushSize) { zbc->stage = ZBUFFv05ds_read; if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize) zbc->outStart = zbc->outEnd = 0; break; } /* cannot flush everything */ notDone = 0; break; } default: return ERROR(GENERIC); /* impossible */ } } *srcSizePtr = ip-istart; *maxDstSizePtr = op-ostart; { size_t nextSrcSizeHint = ZSTDv05_nextSrcSizeToDecompress(zbc->zc); if (nextSrcSizeHint > ZBUFFv05_blockHeaderSize) nextSrcSizeHint+= ZBUFFv05_blockHeaderSize; /* get next block header too */ nextSrcSizeHint -= zbc->inPos; /* already loaded*/ return nextSrcSizeHint; } } /* ************************************* * Tool functions ***************************************/ unsigned ZBUFFv05_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZBUFFv05_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } size_t ZBUFFv05_recommendedDInSize(void) { return BLOCKSIZE + ZBUFFv05_blockHeaderSize /* block header size*/ ; } size_t ZBUFFv05_recommendedDOutSize(void) { return BLOCKSIZE; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v03.c0000644000175000017500000033707613771325506024434 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include /* size_t, ptrdiff_t */ #include "zstd_v03.h" #include "../common/error_private.h" /****************************************** * Compiler-specific ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif /* ****************************************************************** mem.h low-level memory access routines Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /****************************************** * Includes ******************************************/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ /****************************************** * Compiler-specific ******************************************/ #if defined(__GNUC__) # define MEM_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /**************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /**************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets generating assembly depending on alignment. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE24(const void* memPtr) { return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); } } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); } } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /* ****************************************************************** bitstream Part of NewGen Entropy library header file (to include) Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which highly benefit from being inlined. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /********************************************** * bitStream decompression API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /****************************************** * unsafe API ******************************************/ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /**************************************************************** * Helper functions ****************************************************************/ MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /********************************************************** * bitStream decoding **********************************************************/ /*!BIT_initDStream * Initialize a BIT_DStream_t. * @bitD : a pointer to an already allocated BIT_DStream_t structure * @srcBuffer must point at the beginning of a bitStream * @srcSize must be the exact size of the bitStream * @result : size of stream (== srcSize) or an errorCode if a problem is detected */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } if (srcSize >= sizeof(size_t)) /* normal case */ { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); bitD->bitContainer = MEM_readLEST(bitD->ptr); contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); } else { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); /* fallthrough */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); /* fallthrough */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); /* fallthrough */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fallthrough */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fallthrough */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fallthrough */ default:; } contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } /*! BIT_lookBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*!BIT_readBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return BIT_DStream_overflow; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BIT_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BIT_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } /*! BIT_endOfDStream * @return Tells if DStream has reached its exact end */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ /* ****************************************************************** Error codes and messages Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE #if defined (__cplusplus) extern "C" { #endif /****************************************** * Compiler-specific ******************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define ERR_STATIC static inline #elif defined(_MSC_VER) # define ERR_STATIC static __inline #elif defined(__GNUC__) # define ERR_STATIC static __attribute__((unused)) #else # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /****************************************** * Error Management ******************************************/ #define PREFIX(name) ZSTD_error_##name #define ERROR(name) (size_t)-PREFIX(name) #define ERROR_LIST(ITEM) \ ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \ ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \ ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \ ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \ ITEM(PREFIX(maxCode)) #define ERROR_GENERATE_ENUM(ENUM) ENUM, typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */ #define ERROR_CONVERTTOSTRING(STRING) #STRING, #define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR) static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) }; ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC const char* ERR_getErrorName(size_t code) { static const char* codeError = "Unspecified error code"; if (ERR_isError(code)) return ERR_strings[-(int)(code)]; return codeError; } #if defined (__cplusplus) } #endif #endif /* ERROR_H_MODULE */ /* Constructor and Destructor of type FSE_CTable Note that its size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ /* ****************************************************************** FSE : Finite State Entropy coder header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif /****************************************** * Static allocation ******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 #define FSE_BLOCKBOUND(size) (size + (size>>7)) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ /****************************************** * Implementation of inline functions ******************************************/ /* decompression */ typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } #if defined (__cplusplus) } #endif /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif /****************************************** * Static allocation macros ******************************************/ /* Huff0 buffer bounds */ #define HUF_CTABLEBOUND 129 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of Huff0's DTable */ #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1< /* size_t */ /* ************************************* * Version ***************************************/ #define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ #define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */ #define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ #if defined (__cplusplus) } #endif /* zstd - standard compression library Header File for static linking only Copyright (C) 2014-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /* The objects defined into this file should be considered experimental. * They are not labelled stable, as their prototype may change in the future. * You can use them for tests, provide feedback, or if you can endure risk of future changes. */ #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Streaming functions ***************************************/ typedef struct ZSTD_DCtx_s ZSTD_DCtx; /* Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTD_magicNumber 0xFD2FB523 /* v0.3 */ #if defined (__cplusplus) } #endif /* ****************************************************************** FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSE_COMMONDEFS_ONLY /**************************************************************** * Tuning parameters ****************************************************************/ /* MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSE_MAX_MEMORY_USAGE 14 #define FSE_DEFAULT_MEMORY_USAGE 13 /* FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSE_MAX_SYMBOL_VALUE 255 /**************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION /**************************************************************** * Byte symbol type ****************************************************************/ #endif /* !FSE_COMMONDEFS_ONLY */ /**************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /**************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif /**************************************************************** * Error Management ****************************************************************/ #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /**************************************************************** * Complex types ****************************************************************/ typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; /**************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ #define FSE_DECODE_TYPE FSE_decode_t static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } static size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* ptr = dt+1; FSE_DTableHeader DTableH; FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr; const U32 tableSize = 1 << tableLog; const U32 tableMask = tableSize-1; const U32 step = FSE_tableStep(tableSize); U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; U32 position = 0; U32 highThreshold = tableSize-1; const S16 largeLimit= (S16)(1 << (tableLog-1)); U32 noLarge = 1; U32 s; /* Sanity Checks */ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ DTableH.tableLog = (U16)tableLog; for (s=0; s<=maxSymbolValue; s++) { if (normalizedCounter[s]==-1) { tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) noLarge=0; symbolNext[s] = normalizedCounter[s]; } } /* Spread symbols */ for (s=0; s<=maxSymbolValue; s++) { int i; for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ /* Build Decoding table */ { U32 i; for (i=0; i FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { const short max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSE_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } { if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } } if (remaining != 1) return ERROR(GENERIC); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); return ip-istart; } /********************************************************* * Decompression (Byte symbols) *********************************************************/ static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; DTableH->tableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSymbolValue = tableMask; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s<=maxSymbolValue; s++) { dinfo[s].newState = 0; dinfo[s].symbol = (BYTE)s; dinfo[s].nbBits = (BYTE)nbBits; } return 0; } FORCE_INLINE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt, const unsigned fast) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-3; BIT_DStream_t bitD; FSE_DState_t state1; FSE_DState_t state2; size_t errorCode; /* Init */ errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ if (FSE_isError(errorCode)) return errorCode; FSE_initDState(&state1, &bitD, dt); FSE_initDState(&state2, &bitD, dt); #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) /* 4 symbols per loop */ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ while (1) { if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) break; *op++ = FSE_GETSYMBOL(&state1); if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) break; *op++ = FSE_GETSYMBOL(&state2); } /* end ? */ if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) return op-ostart; if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ return ERROR(corruption_detected); } static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); /* select fast mode (static) */ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; size_t errorCode; if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ /* normal FSE decoding mode */ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSE_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ ip += errorCode; cSrcSize -= errorCode; errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSE_isError(errorCode)) return errorCode; /* always return, even if it is an error code */ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); } #endif /* FSE_COMMONDEFS_ONLY */ /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /**************************************************************** * Compiler specifics ****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) /* inline is defined */ #elif defined(_MSC_VER) # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # define inline __inline #else # define inline /* disable inline */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /**************************************************************** * Error Management ****************************************************************/ #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /****************************************** * Helper functions ******************************************/ static unsigned HUF_isError(size_t code) { return ERR_isError(code); } #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ #define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ #define HUF_MAX_SYMBOL_VALUE 255 #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) # error "HUF_MAX_TABLELOG is too large !" #endif /********************************************************* * Huff0 : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; /*! HUF_readStats Read compact Huffman tree, saved by HUF_writeCTable @huffWeight : destination buffer @return : size read from `src` */ static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; U32 tableLog; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; U32 n; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) /* special header */ { if (iSize >= (242)) /* RLE */ { static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, hwSize); iSize = 0; } else /* Incompressible */ { oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else /* header compressed with FSE (normal case) */ { if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); weightTotal = 0; for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ tableLog = BIT_highbit32(weightTotal) + 1; if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); { U32 total = 1 << tableLog; U32 rest = total - weightTotal; U32 verif = 1 << BIT_highbit32(rest); U32 lastWeight = BIT_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); *tableLogPtr = tableLog; return iSize+1; } /**************************/ /* single-symbol decoding */ /**************************/ static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; const BYTE* ip = (const BYTE*) src; size_t iSize = ip[0]; U32 nbSymbols = 0; U32 n; U32 nextRankStart; void* ptr = DTable+1; HUF_DEltX2* const dt = (HUF_DEltX2*)(ptr); HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } /* fill DTable */ for (n=0; n> 1; U32 i; HUF_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize; } static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; BIT_skipBits(Dstream, dt[val].nbBits); return c; } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_1(p, bitDPtr); HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, hence no need to reload */ while (p < pEnd) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } static size_t HUF_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* ptr = DTable; const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t errorCode; errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); if (HUF_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /***************************/ /* double-symbols decoding */ /***************************/ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUF_DEltX4 DElt; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; U32 s; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ for (s=0; s= 1 */ rankVal[weight] += length; } } typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) /* enough room for a second symbol */ { U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { U32 i; const U32 end = start + length; HUF_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; for (i = start; i < end; i++) DTable[i] = DElt; } rankVal[weight] += length; } } static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) { BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; U32* const rankStart = rankStart0+1; rankVal_t rankVal; U32 tableLog, maxW, sizeOfSort, nbSymbols; const U32 memLog = DTable[0]; const BYTE* ip = (const BYTE*) src; size_t iSize = ip[0]; void* ptr = DTable; HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1; HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w<=maxW; w++) { U32 current = nextRankStart; nextRankStart += rankStats[w]; rankStart[w] = current; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ sizeOfSort = nextRankStart; } /* sort symbols by weight */ { U32 s; for (s=0; s> consumed; } } } HUF_fillDTableX4(dt, memLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); return iSize; } static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ } } return 1; } #define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) { HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_1(p, bitDPtr); HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); while (p <= pEnd-2) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); return p-pStart; } static size_t HUF_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U32* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* ptr = DTable; const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_1(op1, &bitD1); HUF_DECODE_SYMBOLX4_1(op2, &bitD2); HUF_DECODE_SYMBOLX4_1(op3, &bitD3); HUF_DECODE_SYMBOLX4_1(op4, &bitD4); HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_0(op1, &bitD1); HUF_DECODE_SYMBOLX4_0(op2, &bitD2); HUF_DECODE_SYMBOLX4_0(op3, &bitD3); HUF_DECODE_SYMBOLX4_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /**********************************/ /* Generic decompression selector */ /**********************************/ typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { /* single, double, quad */ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL }; /* estimate decompression time */ U32 Q; const U32 D256 = (U32)(dstSize >> 8); U32 Dtime[3]; U32 algoNb = 0; int n; /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ /* decoder timing evaluation */ Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ for (n=0; n<3; n++) Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256); Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */ if (Dtime[1] < Dtime[0]) algoNb = 1; return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */ //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */ //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */ } /* zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect */ #define ZSTD_MEMORY_USAGE 17 /*! * HEAPMODE : * Select how default compression functions will allocate memory for their hash table, * in memory stack (0, fastest), or in memory heap (1, requires malloc()) * Note that compression context is fairly large, as a consequence heap memory is recommended. */ #ifndef ZSTD_HEAPMODE # define ZSTD_HEAPMODE 1 #endif /* ZSTD_HEAPMODE */ /*! * LEGACY_SUPPORT : * decompressor can decode older formats (starting from Zstd 0.1+) */ #ifndef ZSTD_LEGACY_SUPPORT # define ZSTD_LEGACY_SUPPORT 1 #endif /* ******************************************************* * Includes *********************************************************/ #include /* calloc */ #include /* memcpy, memmove */ #include /* debug : printf */ /* ******************************************************* * Compiler specifics *********************************************************/ #ifdef __AVX2__ # include /* AVX2 intrinsics */ #endif #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #else # define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #endif /* ******************************************************* * Constants *********************************************************/ #define HASH_LOG (ZSTD_MEMORY_USAGE - 2) #define HASH_TABLESIZE (1 << HASH_LOG) #define HASH_MASK (HASH_TABLESIZE - 1) #define KNUTH 2654435761 #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define BIT1 2 #define BIT0 1 #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BLOCKSIZE (128 KB) /* define, for static allocation */ #define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/) #define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE) #define IS_RAW BIT0 #define IS_RLE BIT1 #define WORKPLACESIZE (BLOCKSIZE*3) #define MINMATCH 4 #define MLbits 7 #define LLbits 6 #define Offbits 5 #define MaxML ((1<blockType = (blockType_t)(headerFlags >> 6); bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; if (bpPtr->blockType == bt_end) return 0; if (bpPtr->blockType == bt_rle) return 1; return cSize; } static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); if (srcSize > 0) { memcpy(dst, src, srcSize); } return srcSize; } /** ZSTD_decompressLiterals @return : nb of bytes read from src, or an error code*/ static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > *maxDstSizePtr) return ERROR(corruption_detected); if (litCSize + 5 > srcSize) return ERROR(corruption_detected); if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected); *maxDstSizePtr = litSize; return litCSize + 5; } /** ZSTD_decodeLiteralsBlock @return : nb of bytes read from src (< srcSize )*/ static size_t ZSTD_decodeLiteralsBlock(void* ctx, const void* src, size_t srcSize) { ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx; const BYTE* const istart = (const BYTE* const)src; /* any compressed block with literals segment must be at least this size */ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); switch(*istart & 3) { default: case 0: { size_t litSize = BLOCKSIZE; const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, 8); return readSize; /* works if it's an error too */ } case IS_RAW: { const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */ { if (litSize > BLOCKSIZE) return ERROR(corruption_detected); if (litSize > srcSize-3) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, 8); return litSize+3; } /* direct reference into compressed stream */ dctx->litPtr = istart+3; dctx->litSize = litSize; return litSize+3; } case IS_RLE: { const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > BLOCKSIZE) return ERROR(corruption_detected); memset(dctx->litBuffer, istart[3], litSize + 8); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return 4; } } } static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* ip = istart; const BYTE* const iend = istart + srcSize; U32 LLtype, Offtype, MLtype; U32 LLlog, Offlog, MLlog; size_t dumpsLength; /* check */ if (srcSize < 5) return ERROR(srcSize_wrong); /* SeqHead */ *nbSeq = MEM_readLE16(ip); ip+=2; LLtype = *ip >> 6; Offtype = (*ip >> 4) & 3; MLtype = (*ip >> 2) & 3; if (*ip & 2) { dumpsLength = ip[2]; dumpsLength += ip[1] << 8; ip += 3; } else { dumpsLength = ip[1]; dumpsLength += (ip[0] & 1) << 8; ip += 2; } *dumpsPtr = ip; ip += dumpsLength; *dumpsLengthPtr = dumpsLength; /* check */ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ /* sequences */ { S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ size_t headerSize; /* Build DTables */ switch(LLtype) { case bt_rle : LLlog = 0; FSE_buildDTable_rle(DTableLL, *ip++); break; case bt_raw : LLlog = LLbits; FSE_buildDTable_raw(DTableLL, LLbits); break; default : { U32 max = MaxLL; headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (LLlog > LLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableLL, norm, max, LLlog); } } switch(Offtype) { case bt_rle : Offlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */ break; case bt_raw : Offlog = Offbits; FSE_buildDTable_raw(DTableOffb, Offbits); break; default : { U32 max = MaxOff; headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (Offlog > OffFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableOffb, norm, max, Offlog); } } switch(MLtype) { case bt_rle : MLlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableML, *ip++); break; case bt_raw : MLlog = MLbits; FSE_buildDTable_raw(DTableML, MLbits); break; default : { U32 max = MaxML; headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (MLlog > MLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableML, norm, max, MLlog); } } } return ip-istart; } typedef struct { size_t litLength; size_t offset; size_t matchLength; } seq_t; typedef struct { BIT_DStream_t DStream; FSE_DState_t stateLL; FSE_DState_t stateOffb; FSE_DState_t stateML; size_t prevOffset; const BYTE* dumps; const BYTE* dumpsEnd; } seqState_t; static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) { size_t litLength; size_t prevOffset; size_t offset; size_t matchLength; const BYTE* dumps = seqState->dumps; const BYTE* const de = seqState->dumpsEnd; /* Literal length */ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); prevOffset = litLength ? seq->offset : seqState->prevOffset; seqState->prevOffset = seq->offset; if (litLength == MaxLL) { const U32 add = dumps= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */ } /* Offset */ { static const size_t offsetPrefix[MaxOff+1] = { /* note : size_t faster than U32 */ 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 }; U32 offsetCode, nbBits; offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); nbBits = offsetCode - 1; if (offsetCode==0) nbBits = 0; /* cmove */ offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits); if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); if (offsetCode==0) offset = prevOffset; /* cmove */ } /* MatchLength */ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); if (matchLength == MaxML) { const U32 add = dumps= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */ } matchLength += MINMATCH; /* save result */ seq->litLength = litLength; seq->offset = offset; seq->matchLength = matchLength; seqState->dumps = dumps; } static size_t ZSTD_execSequence(BYTE* op, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, BYTE* const base, BYTE* const oend) { static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ const BYTE* const ostart = op; BYTE* const oLitEnd = op + sequence.litLength; BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_8 = oend-8; const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ /* copy Match */ { const BYTE* match = op - sequence.offset; /* check */ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */ //if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */ if (match < base) return ERROR(corruption_detected); /* close range match, overlap */ if (sequence.offset < 8) { const int dec64 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTD_copy4(op+4, match); match -= dec64; } else { ZSTD_copy8(op, match); } op += 8; match += 8; if (oMatchEnd > oend-(16-MINMATCH)) { if (op < oend_8) { ZSTD_wildcopy(op, match, oend_8 - op); match += oend_8 - op; op = oend_8; } while (op < oMatchEnd) *op++ = *match++; } else { ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ } } return oMatchEnd - ostart; } static size_t ZSTD_decompressSequences( void* ctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize) { ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx; const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t errorCode, dumpsLength; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; int nbSeq; const BYTE* dumps; U32* DTableLL = dctx->LLTable; U32* DTableML = dctx->MLTable; U32* DTableOffb = dctx->OffTable; BYTE* const base = (BYTE*) (dctx->base); /* Build Decoding Tables */ errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, DTableLL, DTableML, DTableOffb, ip, iend-ip); if (ZSTD_isError(errorCode)) return errorCode; ip += errorCode; /* Regen sequences */ { seq_t sequence; seqState_t seqState; memset(&sequence, 0, sizeof(sequence)); seqState.dumps = dumps; seqState.dumpsEnd = dumps + dumpsLength; seqState.prevOffset = sequence.offset = 4; errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip); if (ERR_isError(errorCode)) return ERROR(corruption_detected); FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; ) { size_t oneSeqSize; nbSeq--; ZSTD_decodeSequence(&sequence, &seqState); oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend); if (ZSTD_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } /* check if reached exact end */ if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ /* last literal segment */ { size_t lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); if (lastLLSize > 0) { if (op != litPtr) memmove(op, litPtr, lastLLSize); op += lastLLSize; } } } return op-ostart; } static size_t ZSTD_decompressBlock( void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; /* Decode literals sub-block */ size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize); } static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* iend = ip + srcSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t remainingSize = srcSize; U32 magicNumber; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); magicNumber = MEM_readLE32(src); if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; /* Loop on each block */ while (1) { size_t decodedSize=0; size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties); if (ZSTD_isError(cBlockSize)) return cBlockSize; ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize); break; case bt_raw : decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize); break; case bt_rle : return ERROR(GENERIC); /* not yet supported */ break; case bt_end : /* end of frame */ if (remainingSize) return ERROR(srcSize_wrong); break; default: return ERROR(GENERIC); /* impossible */ } if (cBlockSize == 0) break; /* bt_end */ if (ZSTD_isError(decodedSize)) return decodedSize; op += decodedSize; ip += cBlockSize; remainingSize -= cBlockSize; } return op-ostart; } static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { ZSTD_DCtx ctx; ctx.base = dst; return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); } /* ZSTD_errorFrameSizeInfoLegacy() : assumes `cSize` and `dBound` are _not_ NULL */ MEM_STATIC void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) { *cSize = ret; *dBound = ZSTD_CONTENTSIZE_ERROR; } void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) { const BYTE* ip = (const BYTE*)src; size_t remainingSize = srcSize; size_t nbBlocks = 0; U32 magicNumber; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } magicNumber = MEM_readLE32(src); if (magicNumber != ZSTD_magicNumber) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); return; } ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; /* Loop on each block */ while (1) { size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTD_isError(cBlockSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize); return; } ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (cBlockSize > remainingSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } if (cBlockSize == 0) break; /* bt_end */ ip += cBlockSize; remainingSize -= cBlockSize; nbBlocks++; } *cSize = ip - (const BYTE*)src; *dBound = nbBlocks * BLOCKSIZE; } /******************************* * Streaming Decompression API *******************************/ static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx) { dctx->expected = ZSTD_frameHeaderSize; dctx->phase = 0; dctx->previousDstEnd = NULL; dctx->base = NULL; return 0; } static ZSTD_DCtx* ZSTD_createDCtx(void) { ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx)); if (dctx==NULL) return NULL; ZSTD_resetDCtx(dctx); return dctx; } static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) { free(dctx); return 0; } static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { /* Sanity check */ if (srcSize != ctx->expected) return ERROR(srcSize_wrong); if (dst != ctx->previousDstEnd) /* not contiguous */ ctx->base = dst; /* Decompress : frame header */ if (ctx->phase == 0) { /* Check frame magic header */ U32 magicNumber = MEM_readLE32(src); if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; return 0; } /* Decompress : block header */ if (ctx->phase == 1) { blockProperties_t bp; size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(blockSize)) return blockSize; if (bp.blockType == bt_end) { ctx->expected = 0; ctx->phase = 0; } else { ctx->expected = blockSize; ctx->bType = bp.blockType; ctx->phase = 2; } return 0; } /* Decompress : block content */ { size_t rSize; switch(ctx->bType) { case bt_compressed: rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); break; case bt_raw : rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); break; case bt_rle : return ERROR(GENERIC); /* not yet handled */ break; case bt_end : /* should never happen (filtered at phase 1) */ rSize = 0; break; default: return ERROR(GENERIC); } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } } /* wrapper layer */ unsigned ZSTDv03_isError(size_t code) { return ZSTD_isError(code); } size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize) { return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize); } ZSTDv03_Dctx* ZSTDv03_createDCtx(void) { return (ZSTDv03_Dctx*)ZSTD_createDCtx(); } size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx) { return ZSTD_freeDCtx((ZSTD_DCtx*)dctx); } size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx) { return ZSTD_resetDCtx((ZSTD_DCtx*)dctx); } size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx) { return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx); } size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v01.h0000644000175000017500000000733413771325506024426 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V01_H_28739879432 #define ZSTD_V01_H_28739879432 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error */ unsigned ZSTDv01_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx; ZSTDv01_Dctx* ZSTDv01_createDCtx(void); size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx); size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Streaming functions ***************************************/ size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx); size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx); size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */ #define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V01_H_28739879432 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v05.h0000644000175000017500000001656513771325506024440 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDv05_H #define ZSTDv05_H #if defined (__cplusplus) extern "C" { #endif /*-************************************* * Dependencies ***************************************/ #include /* size_t */ #include "../common/mem.h" /* U64, U32 */ /* ************************************* * Simple functions ***************************************/ /*! ZSTDv05_decompress() : `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail. `dstCapacity` must be large enough, equal or larger than originalSize. @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */ size_t ZSTDv05_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /** ZSTDv05_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.5.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /* ************************************* * Helper functions ***************************************/ /* Error Management */ unsigned ZSTDv05_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ const char* ZSTDv05_getErrorName(size_t code); /*!< provides readable string for an error code */ /* ************************************* * Explicit memory management ***************************************/ /** Decompression context */ typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx; ZSTDv05_DCtx* ZSTDv05_createDCtx(void); size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx); /*!< @return : errorCode */ /** ZSTDv05_decompressDCtx() : * Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */ size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-*********************** * Simple Dictionary API *************************/ /*! ZSTDv05_decompress_usingDict() : * Decompression using a pre-defined Dictionary content (see dictBuilder). * Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted. * Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */ size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*-************************ * Advanced Streaming API ***************************/ typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy; typedef struct { U64 srcSize; U32 windowLog; /* the only useful information to retrieve */ U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy; } ZSTDv05_parameters; size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize); size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize); void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx); size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx); size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-*********************** * ZBUFF API *************************/ typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx; ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void); size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx); size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx); size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize); size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression * * A ZBUFFv05_DCtx object is required to track streaming operations. * Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources. * Use ZBUFFv05_decompressInit() to start a new decompression operation, * or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv05_DCtx objects can be reused multiple times. * * Use ZBUFFv05_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency) * or 0 when a frame is completely decoded * or an error code, which can be tested using ZBUFFv05_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize() * output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ unsigned ZBUFFv05_isError(size_t errorCode); const char* ZBUFFv05_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, and tend to offer better latency */ size_t ZBUFFv05_recommendedDInSize(void); size_t ZBUFFv05_recommendedDOutSize(void); /*-************************************* * Constants ***************************************/ #define ZSTDv05_MAGICNUMBER 0xFD2FB525 /* v0.5 */ #if defined (__cplusplus) } #endif #endif /* ZSTDv0505_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v04.c0000644000175000017500000041025513771325506024424 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /****************************************** * Includes ******************************************/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ #include "zstd_v04.h" #include "../common/error_private.h" /* ****************************************************************** * mem.h *******************************************************************/ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /****************************************** * Compiler-specific ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif #if defined(__GNUC__) # define MEM_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /**************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /*-************************************* * Debug ***************************************/ #include "debug.h" #ifndef assert # define assert(condition) ((void)0) #endif /**************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets generating assembly depending on alignment. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE24(const void* memPtr) { return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); } } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); } } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /* zstd - standard compression library Header File for static linking only */ #ifndef ZSTD_STATIC_H #define ZSTD_STATIC_H /* ************************************* * Types ***************************************/ #define ZSTD_WINDOWLOG_ABSOLUTEMIN 11 /** from faster to stronger */ typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy; typedef struct { U64 srcSize; /* optional : tells how much bytes are present in the frame. Use 0 if not known. */ U32 windowLog; /* largest match distance : larger == more compression, more memory needed during decompression */ U32 contentLog; /* full search segment : larger == more compression, slower, more memory (useless for fast) */ U32 hashLog; /* dispatch table : larger == more memory, faster */ U32 searchLog; /* nb of searches : larger == more compression, slower */ U32 searchLength; /* size of matches : larger == faster decompression, sometimes less compression */ ZSTD_strategy strategy; } ZSTD_parameters; typedef ZSTDv04_Dctx ZSTD_DCtx; /* ************************************* * Advanced functions ***************************************/ /** ZSTD_decompress_usingDict * Same as ZSTD_decompressDCtx, using a Dictionary content as prefix * Note : dict can be NULL, in which case, it's equivalent to ZSTD_decompressDCtx() */ static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, const void* dict,size_t dictSize); /* ************************************** * Streaming functions (direct mode) ****************************************/ static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx); static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize); static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* src, size_t srcSize); static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); static size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Streaming decompression, bufferless mode A ZSTD_DCtx object is required to track streaming operations. Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. A ZSTD_DCtx object can be re-used multiple times. Use ZSTD_resetDCtx() to return to fresh status. First operation is to retrieve frame parameters, using ZSTD_getFrameParams(). This function doesn't consume its input. It needs enough input data to properly decode the frame header. Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding. Result : 0 when successful, it means the ZSTD_parameters structure has been filled. >0 : means there is not enough data into src. Provides the expected size to successfully decode header. errorCode, which can be tested using ZSTD_isError() (For example, if it's not a ZSTD header) Then, you can optionally insert a dictionary. This operation must mimic the compressor behavior, otherwise decompression will fail or be corrupted. Then it's possible to start decompression. Use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail. ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog). They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible. @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. Context can then be reset to start a new decompression. */ #endif /* ZSTD_STATIC_H */ /* zstd_internal - common functions to include Header File for include */ #ifndef ZSTD_CCOMMON_H_MODULE #define ZSTD_CCOMMON_H_MODULE /* ************************************* * Common macros ***************************************/ #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) /* ************************************* * Common constants ***************************************/ #define ZSTD_MAGICNUMBER 0xFD2FB524 /* v0.4 */ #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BLOCKSIZE (128 KB) /* define, for static allocation */ static const size_t ZSTD_blockHeaderSize = 3; static const size_t ZSTD_frameHeaderSize_min = 5; #define ZSTD_frameHeaderSize_max 5 /* define, for static allocation */ #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define BIT1 2 #define BIT0 1 #define IS_RAW BIT0 #define IS_RLE BIT1 #define MINMATCH 4 #define REPCODE_STARTVALUE 4 #define MLbits 7 #define LLbits 6 #define Offbits 5 #define MaxML ((1< /* size_t, ptrdiff_t */ /* ***************************************** * FSE simple functions ******************************************/ static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize); /*! FSE_decompress(): Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'maxDstSize'. return : size of regenerated data (<= maxDstSize) or an error code, which can be tested using FSE_isError() ** Important ** : FSE_decompress() doesn't decompress non-compressible nor RLE data !!! Why ? : making this distinction requires a header. Header management is intentionally delegated to the user layer, which can better manage special cases. */ /* ***************************************** * Tool functions ******************************************/ /* Error Management */ static unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ /* ***************************************** * FSE detailed API ******************************************/ /*! FSE_compress() does the following: 1. count symbol occurrence from source[] into table count[] 2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) 3. save normalized counters to memory buffer using writeNCount() 4. build encoding table 'CTable' from normalized counters 5. encode the data stream using encoding table 'CTable' FSE_decompress() does the following: 1. read normalized counters with readNCount() 2. build decoding table 'DTable' from normalized counters 3. decode the data stream using decoding table 'DTable' The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and provide normalized distribution using external method. */ /* *** DECOMPRESSION *** */ /*! FSE_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'. return : size read from 'rBuffer' or an errorCode, which can be tested using FSE_isError() maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); /*! Constructor and Destructor of type FSE_DTable Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ /*! FSE_buildDTable(): Builds 'dt', which must be already allocated, using FSE_createDTable() return : 0, or an errorCode, which can be tested using FSE_isError() */ static size_t FSE_buildDTable ( FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_decompress_usingDTable(): Decompress compressed source 'cSrc' of size 'cSrcSize' using 'dt' into 'dst' which must be already allocated. return : size of regenerated data (necessarily <= maxDstSize) or an errorCode, which can be tested using FSE_isError() */ static size_t FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : ---------- (Note : these functions only decompress FSE-compressed blocks. If block is uncompressed, use memcpy() instead If block is a single repeated byte, use memset() instead ) The first step is to obtain the normalized frequencies of symbols. This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. In practice, that means it's necessary to know 'maxSymbolValue' beforehand, or size the table to handle worst case situations (typically 256). FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. If there is an error, the function will return an error code, which can be tested using FSE_isError(). The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. This is performed by the function FSE_buildDTable(). The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). If there is an error, the function will return an error code, which can be tested using FSE_isError(). 'FSE_DTable' can then be used to decompress 'cSrc', with FSE_decompress_usingDTable(). 'cSrcSize' must be strictly correct, otherwise decompression will fail. FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=maxDstSize). If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) */ #if defined (__cplusplus) } #endif #endif /* FSE_H */ /* ****************************************************************** bitstream Part of NewGen Entropy library header file (to include) Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which highly benefit from being inlined. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /********************************************** * bitStream decompression API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /****************************************** * unsafe API ******************************************/ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /**************************************************************** * Helper functions ****************************************************************/ MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /********************************************************** * bitStream decoding **********************************************************/ /*!BIT_initDStream * Initialize a BIT_DStream_t. * @bitD : a pointer to an already allocated BIT_DStream_t structure * @srcBuffer must point at the beginning of a bitStream * @srcSize must be the exact size of the bitStream * @result : size of stream (== srcSize) or an errorCode if a problem is detected */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } if (srcSize >= sizeof(size_t)) /* normal case */ { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); bitD->bitContainer = MEM_readLEST(bitD->ptr); contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); } else { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */ default: break; } contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } /*! BIT_lookBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*!BIT_readBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return BIT_DStream_overflow; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BIT_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BIT_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } /*! BIT_endOfDStream * @return Tells if DStream has reached its exact end */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ /* ****************************************************************** FSE : Finite State Entropy coder header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSE_STATIC_H #define FSE_STATIC_H #if defined (__cplusplus) extern "C" { #endif /* ***************************************** * Static allocation *******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 #define FSE_BLOCKBOUND(size) (size + (size>>7)) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */ #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ /* ***************************************** * Implementation of inlined functions *******************************************/ /* decompression */ typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } #if defined (__cplusplus) } #endif #endif /* FSE_STATIC_H */ /* ****************************************************************** FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSE_COMMONDEFS_ONLY /* ************************************************************** * Tuning parameters ****************************************************************/ /*!MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSE_MAX_MEMORY_USAGE 14 #define FSE_DEFAULT_MEMORY_USAGE 13 /*!FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSE_MAX_SYMBOL_VALUE 255 /* ************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION #define FSE_DECODE_TYPE FSE_decode_t #endif /* !FSE_COMMONDEFS_ONLY */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /* ************************************************************** * Dependencies ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /* *************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif /* ************************************************************** * Error Management ****************************************************************/ #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************************************** * Complex types ****************************************************************/ typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; /*-************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } static size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { FSE_DTableHeader DTableH; void* const tdPtr = dt+1; /* because dt is unsigned, 32-bits aligned on 32-bits */ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); const U32 tableSize = 1 << tableLog; const U32 tableMask = tableSize-1; const U32 step = FSE_tableStep(tableSize); U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; U32 position = 0; U32 highThreshold = tableSize-1; const S16 largeLimit= (S16)(1 << (tableLog-1)); U32 noLarge = 1; U32 s; /* Sanity Checks */ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ memset(tableDecode, 0, sizeof(FSE_DECODE_TYPE) * (maxSymbolValue+1) ); /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */ DTableH.tableLog = (U16)tableLog; for (s=0; s<=maxSymbolValue; s++) { if (normalizedCounter[s]==-1) { tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) noLarge=0; symbolNext[s] = normalizedCounter[s]; } } /* Spread symbols */ for (s=0; s<=maxSymbolValue; s++) { int i; for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ /* Build Decoding table */ { U32 i; for (i=0; i FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { const short max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSE_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } { if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } } if (remaining != 1) return ERROR(GENERIC); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); return ip-istart; } /********************************************************* * Decompression (Byte symbols) *********************************************************/ static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; void* dPtr = dt + 1; FSE_decode_t* const cell = (FSE_decode_t*)dPtr; DTableH->tableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; void* dPtr = dt + 1; FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSymbolValue = tableMask; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s<=maxSymbolValue; s++) { dinfo[s].newState = 0; dinfo[s].symbol = (BYTE)s; dinfo[s].nbBits = (BYTE)nbBits; } return 0; } FORCE_INLINE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt, const unsigned fast) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-3; BIT_DStream_t bitD; FSE_DState_t state1; FSE_DState_t state2; size_t errorCode; /* Init */ errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ if (FSE_isError(errorCode)) return errorCode; FSE_initDState(&state1, &bitD, dt); FSE_initDState(&state2, &bitD, dt); #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) /* 4 symbols per loop */ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ while (1) { if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) break; *op++ = FSE_GETSYMBOL(&state1); if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) break; *op++ = FSE_GETSYMBOL(&state2); } /* end ? */ if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) return op-ostart; if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ return ERROR(corruption_detected); } static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { FSE_DTableHeader DTableH; U32 fastMode; memcpy(&DTableH, dt, sizeof(DTableH)); fastMode = DTableH.fastMode; /* select fast mode (static) */ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; size_t errorCode; if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ /* normal FSE decoding mode */ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSE_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ ip += errorCode; cSrcSize -= errorCode; errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSE_isError(errorCode)) return errorCode; /* always return, even if it is an error code */ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); } #endif /* FSE_COMMONDEFS_ONLY */ /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library header file Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef HUFF0_H #define HUFF0_H #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Dependency ******************************************/ #include /* size_t */ /* **************************************** * Huff0 simple functions ******************************************/ static size_t HUF_decompress(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*! HUF_decompress(): Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'dstSize'. 'dstSize' must be the exact size of original (uncompressed) data. Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate. @return : size of regenerated data (== dstSize) or an error code, which can be tested using HUF_isError() */ /* **************************************** * Tool functions ******************************************/ /* Error Management */ static unsigned HUF_isError(size_t code); /* tells if a return value is an error code */ #if defined (__cplusplus) } #endif #endif /* HUFF0_H */ /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef HUFF0_STATIC_H #define HUFF0_STATIC_H #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Static allocation macros ******************************************/ /* static allocation of Huff0's DTable */ #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<= 199901L) /* C99 */) /* inline is defined */ #elif defined(_MSC_VER) # define inline __inline #else # define inline /* disable inline */ #endif #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /* ************************************************************** * Constants ****************************************************************/ #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ #define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ #define HUF_MAX_SYMBOL_VALUE 255 #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) # error "HUF_MAX_TABLELOG is too large !" #endif /* ************************************************************** * Error Management ****************************************************************/ static unsigned HUF_isError(size_t code) { return ERR_isError(code); } #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /*-******************************************************* * Huff0 : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; /*! HUF_readStats Read compact Huffman tree, saved by HUF_writeCTable @huffWeight : destination buffer @return : size read from `src` */ static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; U32 tableLog; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; U32 n; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) /* special header */ { if (iSize >= (242)) /* RLE */ { static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, hwSize); iSize = 0; } else /* Incompressible */ { oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else /* header compressed with FSE (normal case) */ { if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); weightTotal = 0; for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ tableLog = BIT_highbit32(weightTotal) + 1; if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); { U32 total = 1 << tableLog; U32 rest = total - weightTotal; U32 verif = 1 << BIT_highbit32(rest); U32 lastWeight = BIT_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); *tableLogPtr = tableLog; return iSize+1; } /**************************/ /* single-symbol decoding */ /**************************/ static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; size_t iSize; U32 nbSymbols = 0; U32 n; U32 nextRankStart; void* const dtPtr = DTable + 1; HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } /* fill DTable */ for (n=0; n> 1; U32 i; HUF_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize; } static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; BIT_skipBits(Dstream, dt[val].nbBits); return c; } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_1(p, bitDPtr); HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, hence no need to reload */ while (p < pEnd) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } static size_t HUF_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable; const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t errorCode; errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); if (HUF_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /***************************/ /* double-symbols decoding */ /***************************/ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUF_DEltX4 DElt; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; U32 s; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ for (s=0; s= 1 */ rankVal[weight] += length; } } typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) /* enough room for a second symbol */ { U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { U32 i; const U32 end = start + length; HUF_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; for (i = start; i < end; i++) DTable[i] = DElt; } rankVal[weight] += length; } } static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) { BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; U32* const rankStart = rankStart0+1; rankVal_t rankVal; U32 tableLog, maxW, sizeOfSort, nbSymbols; const U32 memLog = DTable[0]; size_t iSize; void* dtPtr = DTable; HUF_DEltX4* const dt = ((HUF_DEltX4*)dtPtr) + 1; HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w<=maxW; w++) { U32 current = nextRankStart; nextRankStart += rankStats[w]; rankStart[w] = current; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ sizeOfSort = nextRankStart; } /* sort symbols by weight */ { U32 s; for (s=0; s> consumed; } } } HUF_fillDTableX4(dt, memLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); return iSize; } static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ } } return 1; } #define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) { HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_1(p, bitDPtr); HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); while (p <= pEnd-2) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); return p-pStart; } static size_t HUF_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U32* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable; const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_1(op1, &bitD1); HUF_DECODE_SYMBOLX4_1(op2, &bitD2); HUF_DECODE_SYMBOLX4_1(op3, &bitD3); HUF_DECODE_SYMBOLX4_1(op4, &bitD4); HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_0(op1, &bitD1); HUF_DECODE_SYMBOLX4_0(op2, &bitD2); HUF_DECODE_SYMBOLX4_0(op3, &bitD3); HUF_DECODE_SYMBOLX4_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /**********************************/ /* Generic decompression selector */ /**********************************/ typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { /* single, double, quad */ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL }; /* estimate decompression time */ U32 Q; const U32 D256 = (U32)(dstSize >> 8); U32 Dtime[3]; U32 algoNb = 0; int n; /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ /* decoder timing evaluation */ Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ for (n=0; n<3; n++) Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256); Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */ if (Dtime[1] < Dtime[0]) algoNb = 1; return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */ //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */ //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */ } #endif /* ZSTD_CCOMMON_H_MODULE */ /* zstd - decompression module fo v0.4 legacy format Copyright (C) 2015-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * HEAPMODE : * Select how default decompression function ZSTD_decompress() will allocate memory, * in memory stack (0), or in memory heap (1, requires malloc()) */ #ifndef ZSTD_HEAPMODE # define ZSTD_HEAPMODE 1 #endif /* ******************************************************* * Includes *********************************************************/ #include /* calloc */ #include /* memcpy, memmove */ #include /* debug : printf */ /* ******************************************************* * Compiler specifics *********************************************************/ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif /* ************************************* * Local types ***************************************/ typedef struct { blockType_t blockType; U32 origSize; } blockProperties_t; /* ******************************************************* * Memory operations **********************************************************/ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } /* ************************************* * Error Management ***************************************/ /*! ZSTD_isError * tells if a return value is an error code */ static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /* ************************************************************* * Context management ***************************************************************/ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTD_dStage; struct ZSTDv04_Dctx_s { U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; const void* previousDstEnd; const void* base; const void* vBase; const void* dictEnd; size_t expected; size_t headerSize; ZSTD_parameters params; blockType_t bType; ZSTD_dStage stage; const BYTE* litPtr; size_t litSize; BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */]; BYTE headerBuffer[ZSTD_frameHeaderSize_max]; }; /* typedef'd to ZSTD_DCtx within "zstd_static.h" */ static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx) { dctx->expected = ZSTD_frameHeaderSize_min; dctx->stage = ZSTDds_getFrameHeaderSize; dctx->previousDstEnd = NULL; dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; return 0; } static ZSTD_DCtx* ZSTD_createDCtx(void) { ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx)); if (dctx==NULL) return NULL; ZSTD_resetDCtx(dctx); return dctx; } static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) { free(dctx); return 0; } /* ************************************************************* * Decompression section ***************************************************************/ /** ZSTD_decodeFrameHeader_Part1 * decode the 1st part of the Frame Header, which tells Frame Header size. * srcSize must be == ZSTD_frameHeaderSize_min * @return : the full size of the Frame Header */ static size_t ZSTD_decodeFrameHeader_Part1(ZSTD_DCtx* zc, const void* src, size_t srcSize) { U32 magicNumber; if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong); magicNumber = MEM_readLE32(src); if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown); zc->headerSize = ZSTD_frameHeaderSize_min; return zc->headerSize; } static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize) { U32 magicNumber; if (srcSize < ZSTD_frameHeaderSize_min) return ZSTD_frameHeaderSize_max; magicNumber = MEM_readLE32(src); if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown); memset(params, 0, sizeof(*params)); params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTD_WINDOWLOG_ABSOLUTEMIN; if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported); /* reserved bits */ return 0; } /** ZSTD_decodeFrameHeader_Part2 * decode the full Frame Header * srcSize must be the size provided by ZSTD_decodeFrameHeader_Part1 * @return : 0, or an error code, which can be tested using ZSTD_isError() */ static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_t srcSize) { size_t result; if (srcSize != zc->headerSize) return ERROR(srcSize_wrong); result = ZSTD_getFrameParams(&(zc->params), src, srcSize); if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported); return result; } static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { const BYTE* const in = (const BYTE* const)src; BYTE headerFlags; U32 cSize; if (srcSize < 3) return ERROR(srcSize_wrong); headerFlags = *in; cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); bpPtr->blockType = (blockType_t)(headerFlags >> 6); bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; if (bpPtr->blockType == bt_end) return 0; if (bpPtr->blockType == bt_rle) return 1; return cSize; } static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); if (srcSize > 0) { memcpy(dst, src, srcSize); } return srcSize; } /** ZSTD_decompressLiterals @return : nb of bytes read from src, or an error code*/ static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > *maxDstSizePtr) return ERROR(corruption_detected); if (litCSize + 5 > srcSize) return ERROR(corruption_detected); if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected); *maxDstSizePtr = litSize; return litCSize + 5; } /** ZSTD_decodeLiteralsBlock @return : nb of bytes read from src (< srcSize ) */ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ { const BYTE* const istart = (const BYTE*) src; /* any compressed block with literals segment must be at least this size */ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); switch(*istart & 3) { /* compressed */ case 0: { size_t litSize = BLOCKSIZE; const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, 8); return readSize; /* works if it's an error too */ } case IS_RAW: { const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */ { if (litSize > BLOCKSIZE) return ERROR(corruption_detected); if (litSize > srcSize-3) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, 8); return litSize+3; } /* direct reference into compressed stream */ dctx->litPtr = istart+3; dctx->litSize = litSize; return litSize+3; } case IS_RLE: { const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ if (litSize > BLOCKSIZE) return ERROR(corruption_detected); memset(dctx->litBuffer, istart[3], litSize + 8); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return 4; } default: return ERROR(corruption_detected); /* forbidden nominal case */ } } static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* ip = istart; const BYTE* const iend = istart + srcSize; U32 LLtype, Offtype, MLtype; U32 LLlog, Offlog, MLlog; size_t dumpsLength; /* check */ if (srcSize < 5) return ERROR(srcSize_wrong); /* SeqHead */ *nbSeq = MEM_readLE16(ip); ip+=2; LLtype = *ip >> 6; Offtype = (*ip >> 4) & 3; MLtype = (*ip >> 2) & 3; if (*ip & 2) { dumpsLength = ip[2]; dumpsLength += ip[1] << 8; ip += 3; } else { dumpsLength = ip[1]; dumpsLength += (ip[0] & 1) << 8; ip += 2; } *dumpsPtr = ip; ip += dumpsLength; *dumpsLengthPtr = dumpsLength; /* check */ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ /* sequences */ { S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL >= MaxOff */ size_t headerSize; /* Build DTables */ switch(LLtype) { case bt_rle : LLlog = 0; FSE_buildDTable_rle(DTableLL, *ip++); break; case bt_raw : LLlog = LLbits; FSE_buildDTable_raw(DTableLL, LLbits); break; default : { U32 max = MaxLL; headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (LLlog > LLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableLL, norm, max, LLlog); } } switch(Offtype) { case bt_rle : Offlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */ break; case bt_raw : Offlog = Offbits; FSE_buildDTable_raw(DTableOffb, Offbits); break; default : { U32 max = MaxOff; headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (Offlog > OffFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableOffb, norm, max, Offlog); } } switch(MLtype) { case bt_rle : MLlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableML, *ip++); break; case bt_raw : MLlog = MLbits; FSE_buildDTable_raw(DTableML, MLbits); break; default : { U32 max = MaxML; headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (MLlog > MLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableML, norm, max, MLlog); } } } return ip-istart; } typedef struct { size_t litLength; size_t offset; size_t matchLength; } seq_t; typedef struct { BIT_DStream_t DStream; FSE_DState_t stateLL; FSE_DState_t stateOffb; FSE_DState_t stateML; size_t prevOffset; const BYTE* dumps; const BYTE* dumpsEnd; } seqState_t; static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) { size_t litLength; size_t prevOffset; size_t offset; size_t matchLength; const BYTE* dumps = seqState->dumps; const BYTE* const de = seqState->dumpsEnd; /* Literal length */ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); prevOffset = litLength ? seq->offset : seqState->prevOffset; if (litLength == MaxLL) { const U32 add = dumps= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */ } /* Offset */ { static const U32 offsetPrefix[MaxOff+1] = { 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 }; U32 offsetCode, nbBits; offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); nbBits = offsetCode - 1; if (offsetCode==0) nbBits = 0; /* cmove */ offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits); if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); if (offsetCode==0) offset = prevOffset; /* cmove */ if (offsetCode | !litLength) seqState->prevOffset = seq->offset; /* cmove */ } /* MatchLength */ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); if (matchLength == MaxML) { const U32 add = dumps= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */ } matchLength += MINMATCH; /* save result */ seq->litLength = litLength; seq->offset = offset; seq->matchLength = matchLength; seqState->dumps = dumps; } static size_t ZSTD_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) { static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ BYTE* const oLitEnd = op + sequence.litLength; const size_t sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_8 = oend-8; const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; /* check */ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ /* copy Literals */ ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - base)) { /* offset beyond prefix */ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); match = dictEnd - (base-match); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = base; if (op > oend_8 || sequence.matchLength < MINMATCH) { while (op < oMatchEnd) *op++ = *match++; return sequenceLength; } } } /* Requirement: op <= oend_8 */ /* match within prefix */ if (sequence.offset < 8) { /* close range match, overlap */ const int sub2 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTD_copy4(op+4, match); match -= sub2; } else { ZSTD_copy8(op, match); } op += 8; match += 8; if (oMatchEnd > oend-(16-MINMATCH)) { if (op < oend_8) { ZSTD_wildcopy(op, match, oend_8 - op); match += oend_8 - op; op = oend_8; } while (op < oMatchEnd) *op++ = *match++; } else { ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8, but must be signed */ } return sequenceLength; } static size_t ZSTD_decompressSequences( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t errorCode, dumpsLength; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; int nbSeq; const BYTE* dumps; U32* DTableLL = dctx->LLTable; U32* DTableML = dctx->MLTable; U32* DTableOffb = dctx->OffTable; const BYTE* const base = (const BYTE*) (dctx->base); const BYTE* const vBase = (const BYTE*) (dctx->vBase); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); /* Build Decoding Tables */ errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, DTableLL, DTableML, DTableOffb, ip, iend-ip); if (ZSTD_isError(errorCode)) return errorCode; ip += errorCode; /* Regen sequences */ { seq_t sequence; seqState_t seqState; memset(&sequence, 0, sizeof(sequence)); sequence.offset = 4; seqState.dumps = dumps; seqState.dumpsEnd = dumps + dumpsLength; seqState.prevOffset = 4; errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip); if (ERR_isError(errorCode)) return ERROR(corruption_detected); FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { size_t oneSeqSize; nbSeq--; ZSTD_decodeSequence(&sequence, &seqState); oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); if (ZSTD_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } /* check if reached exact end */ if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* DStream should be entirely and exactly consumed; otherwise data is corrupted */ /* last literal segment */ { size_t lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); if (lastLLSize > 0) { if (op != litPtr) memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } } return op-ostart; } static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) { if (dst != dctx->previousDstEnd) /* not contiguous */ { dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dst; dctx->previousDstEnd = dst; } } static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; size_t litCSize; if (srcSize > BLOCKSIZE) return ERROR(corruption_detected); /* Decode literals sub-block */ litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; return ZSTD_decompressSequences(dctx, dst, maxDstSize, ip, srcSize); } static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, const void* dict, size_t dictSize) { const BYTE* ip = (const BYTE*)src; const BYTE* iend = ip + srcSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t remainingSize = srcSize; blockProperties_t blockProperties; /* init */ ZSTD_resetDCtx(ctx); if (dict) { ZSTD_decompress_insertDictionary(ctx, dict, dictSize); ctx->dictEnd = ctx->previousDstEnd; ctx->vBase = (const char*)dst - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base)); ctx->base = dst; } else { ctx->vBase = ctx->base = ctx->dictEnd = dst; } /* Frame Header */ { size_t frameHeaderSize; if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); frameHeaderSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); ip += frameHeaderSize; remainingSize -= frameHeaderSize; frameHeaderSize = ZSTD_decodeFrameHeader_Part2(ctx, src, frameHeaderSize); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; } /* Loop on each block */ while (1) { size_t decodedSize=0; size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties); if (ZSTD_isError(cBlockSize)) return cBlockSize; ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(ctx, op, oend-op, ip, cBlockSize); break; case bt_raw : decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); break; case bt_rle : return ERROR(GENERIC); /* not yet supported */ break; case bt_end : /* end of frame */ if (remainingSize) return ERROR(srcSize_wrong); break; default: return ERROR(GENERIC); /* impossible */ } if (cBlockSize == 0) break; /* bt_end */ if (ZSTD_isError(decodedSize)) return decodedSize; op += decodedSize; ip += cBlockSize; remainingSize -= cBlockSize; } return op-ostart; } /* ZSTD_errorFrameSizeInfoLegacy() : assumes `cSize` and `dBound` are _not_ NULL */ static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) { *cSize = ret; *dBound = ZSTD_CONTENTSIZE_ERROR; } void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) { const BYTE* ip = (const BYTE*)src; size_t remainingSize = srcSize; size_t nbBlocks = 0; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize_min) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); return; } ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min; /* Loop on each block */ while (1) { size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTD_isError(cBlockSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize); return; } ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (cBlockSize > remainingSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } if (cBlockSize == 0) break; /* bt_end */ ip += cBlockSize; remainingSize -= cBlockSize; nbBlocks++; } *cSize = ip - (const BYTE*)src; *dBound = nbBlocks * BLOCKSIZE; } /* ****************************** * Streaming Decompression API ********************************/ static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { /* Sanity check */ if (srcSize != ctx->expected) return ERROR(srcSize_wrong); ZSTD_checkContinuity(ctx, dst); /* Decompress : frame header; part 1 */ switch (ctx->stage) { case ZSTDds_getFrameHeaderSize : /* get frame header size */ if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */ ctx->headerSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min); if (ZSTD_isError(ctx->headerSize)) return ctx->headerSize; memcpy(ctx->headerBuffer, src, ZSTD_frameHeaderSize_min); if (ctx->headerSize > ZSTD_frameHeaderSize_min) return ERROR(GENERIC); /* impossible */ ctx->expected = 0; /* not necessary to copy more */ /* fallthrough */ case ZSTDds_decodeFrameHeader: /* get frame header */ { size_t const result = ZSTD_decodeFrameHeader_Part2(ctx, ctx->headerBuffer, ctx->headerSize); if (ZSTD_isError(result)) return result; ctx->expected = ZSTD_blockHeaderSize; ctx->stage = ZSTDds_decodeBlockHeader; return 0; } case ZSTDds_decodeBlockHeader: /* Decode block header */ { blockProperties_t bp; size_t const blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(blockSize)) return blockSize; if (bp.blockType == bt_end) { ctx->expected = 0; ctx->stage = ZSTDds_getFrameHeaderSize; } else { ctx->expected = blockSize; ctx->bType = bp.blockType; ctx->stage = ZSTDds_decompressBlock; } return 0; } case ZSTDds_decompressBlock: { /* Decompress : block content */ size_t rSize; switch(ctx->bType) { case bt_compressed: rSize = ZSTD_decompressBlock_internal(ctx, dst, maxDstSize, src, srcSize); break; case bt_raw : rSize = ZSTD_copyRawBlock(dst, maxDstSize, src, srcSize); break; case bt_rle : return ERROR(GENERIC); /* not yet handled */ break; case bt_end : /* should never happen (filtered at phase 1) */ rSize = 0; break; default: return ERROR(GENERIC); } ctx->stage = ZSTDds_decodeBlockHeader; ctx->expected = ZSTD_blockHeaderSize; ctx->previousDstEnd = (char*)dst + rSize; return rSize; } default: return ERROR(GENERIC); /* impossible */ } } static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, size_t dictSize) { ctx->dictEnd = ctx->previousDstEnd; ctx->vBase = (const char*)dict - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base)); ctx->base = dict; ctx->previousDstEnd = (const char*)dict + dictSize; } /* Buffered version of Zstd compression library Copyright (C) 2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /* The objects defined into this file should be considered experimental. * They are not labelled stable, as their prototype may change in the future. * You can use them for tests, provide feedback, or if you can endure risk of future changes. */ /* ************************************* * Includes ***************************************/ #include /** ************************************************ * Streaming decompression * * A ZBUFF_DCtx object is required to track streaming operation. * Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. * Use ZBUFF_decompressInit() to start a new decompression operation. * ZBUFF_DCtx objects can be reused multiple times. * * Use ZBUFF_decompressContinue() repetitively to consume your input. * *srcSizePtr and *maxDstSizePtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr. * Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input. * The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst . * return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) * or 0 when a frame is completely decoded * or an error code, which can be tested using ZBUFF_isError(). * * Hint : recommended buffer sizes (not compulsory) * output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded. * input : just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * **************************************************/ typedef enum { ZBUFFds_init, ZBUFFds_readHeader, ZBUFFds_loadHeader, ZBUFFds_decodeHeader, ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFF_dStage; /* *** Resource management *** */ #define ZSTD_frameHeaderSize_max 5 /* too magical, should come from reference */ struct ZBUFFv04_DCtx_s { ZSTD_DCtx* zc; ZSTD_parameters params; char* inBuff; size_t inBuffSize; size_t inPos; char* outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t hPos; const char* dict; size_t dictSize; ZBUFF_dStage stage; unsigned char headerBuffer[ZSTD_frameHeaderSize_max]; }; /* typedef'd to ZBUFF_DCtx within "zstd_buffered.h" */ typedef ZBUFFv04_DCtx ZBUFF_DCtx; static ZBUFF_DCtx* ZBUFF_createDCtx(void) { ZBUFF_DCtx* zbc = (ZBUFF_DCtx*)malloc(sizeof(ZBUFF_DCtx)); if (zbc==NULL) return NULL; memset(zbc, 0, sizeof(*zbc)); zbc->zc = ZSTD_createDCtx(); zbc->stage = ZBUFFds_init; return zbc; } static size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbc) { if (zbc==NULL) return 0; /* support free on null */ ZSTD_freeDCtx(zbc->zc); free(zbc->inBuff); free(zbc->outBuff); free(zbc); return 0; } /* *** Initialization *** */ static size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbc) { zbc->stage = ZBUFFds_readHeader; zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = zbc->dictSize = 0; return ZSTD_resetDCtx(zbc->zc); } static size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, size_t srcSize) { zbc->dict = (const char*)src; zbc->dictSize = srcSize; return 0; } static size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { size_t length = MIN(maxDstSize, srcSize); if (length > 0) { memcpy(dst, src, length); } return length; } /* *** Decompression *** */ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr) { const char* const istart = (const char*)src; const char* ip = istart; const char* const iend = istart + *srcSizePtr; char* const ostart = (char*)dst; char* op = ostart; char* const oend = ostart + *maxDstSizePtr; U32 notDone = 1; DEBUGLOG(5, "ZBUFF_decompressContinue"); while (notDone) { switch(zbc->stage) { case ZBUFFds_init : DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)"); return ERROR(init_missing); case ZBUFFds_readHeader : /* read header from src */ { size_t const headerSize = ZSTD_getFrameParams(&(zbc->params), src, *srcSizePtr); if (ZSTD_isError(headerSize)) return headerSize; if (headerSize) { /* not enough input to decode header : tell how many bytes would be necessary */ memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr); zbc->hPos += *srcSizePtr; *maxDstSizePtr = 0; zbc->stage = ZBUFFds_loadHeader; return headerSize - zbc->hPos; } zbc->stage = ZBUFFds_decodeHeader; break; } case ZBUFFds_loadHeader: /* complete header from src */ { size_t headerSize = ZBUFF_limitCopy( zbc->headerBuffer + zbc->hPos, ZSTD_frameHeaderSize_max - zbc->hPos, src, *srcSizePtr); zbc->hPos += headerSize; ip += headerSize; headerSize = ZSTD_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos); if (ZSTD_isError(headerSize)) return headerSize; if (headerSize) { /* not enough input to decode header : tell how many bytes would be necessary */ *maxDstSizePtr = 0; return headerSize - zbc->hPos; } } /* intentional fallthrough */ case ZBUFFds_decodeHeader: /* apply header to create / resize buffers */ { size_t const neededOutSize = (size_t)1 << zbc->params.windowLog; size_t const neededInSize = BLOCKSIZE; /* a block is never > BLOCKSIZE */ if (zbc->inBuffSize < neededInSize) { free(zbc->inBuff); zbc->inBuffSize = neededInSize; zbc->inBuff = (char*)malloc(neededInSize); if (zbc->inBuff == NULL) return ERROR(memory_allocation); } if (zbc->outBuffSize < neededOutSize) { free(zbc->outBuff); zbc->outBuffSize = neededOutSize; zbc->outBuff = (char*)malloc(neededOutSize); if (zbc->outBuff == NULL) return ERROR(memory_allocation); } } if (zbc->dictSize) ZSTD_decompress_insertDictionary(zbc->zc, zbc->dict, zbc->dictSize); if (zbc->hPos) { /* some data already loaded into headerBuffer : transfer into inBuff */ memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos); zbc->inPos = zbc->hPos; zbc->hPos = 0; zbc->stage = ZBUFFds_load; break; } zbc->stage = ZBUFFds_read; /* fall-through */ case ZBUFFds_read: { size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc); if (neededInSize==0) /* end of frame */ { zbc->stage = ZBUFFds_init; notDone = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* directly decode from src */ size_t decodedSize = ZSTD_decompressContinue(zbc->zc, zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart, ip, neededInSize); if (ZSTD_isError(decodedSize)) return decodedSize; ip += neededInSize; if (!decodedSize) break; /* this was just a header */ zbc->outEnd = zbc->outStart + decodedSize; zbc->stage = ZBUFFds_flush; break; } if (ip==iend) { notDone = 0; break; } /* no more input */ zbc->stage = ZBUFFds_load; } /* fall-through */ case ZBUFFds_load: { size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc); size_t toLoad = neededInSize - zbc->inPos; /* should always be <= remaining space within inBuff */ size_t loadedSize; if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected); /* should never happen */ loadedSize = ZBUFF_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip); ip += loadedSize; zbc->inPos += loadedSize; if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */ { size_t decodedSize = ZSTD_decompressContinue(zbc->zc, zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart, zbc->inBuff, neededInSize); if (ZSTD_isError(decodedSize)) return decodedSize; zbc->inPos = 0; /* input is consumed */ if (!decodedSize) { zbc->stage = ZBUFFds_read; break; } /* this was just a header */ zbc->outEnd = zbc->outStart + decodedSize; zbc->stage = ZBUFFds_flush; /* ZBUFFds_flush follows */ } } /* fall-through */ case ZBUFFds_flush: { size_t toFlushSize = zbc->outEnd - zbc->outStart; size_t flushedSize = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize); op += flushedSize; zbc->outStart += flushedSize; if (flushedSize == toFlushSize) { zbc->stage = ZBUFFds_read; if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize) zbc->outStart = zbc->outEnd = 0; break; } /* cannot flush everything */ notDone = 0; break; } default: return ERROR(GENERIC); /* impossible */ } } *srcSizePtr = ip-istart; *maxDstSizePtr = op-ostart; { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zbc->zc); if (nextSrcSizeHint > 3) nextSrcSizeHint+= 3; /* get the next block header while at it */ nextSrcSizeHint -= zbc->inPos; /* already loaded*/ return nextSrcSizeHint; } } /* ************************************* * Tool functions ***************************************/ unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } size_t ZBUFFv04_recommendedDInSize() { return BLOCKSIZE + 3; } size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; } /*- ========================================================================= -*/ /* final wrapping stage */ size_t ZSTDv04_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return ZSTD_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0); } size_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1) size_t regenSize; ZSTD_DCtx* dctx = ZSTD_createDCtx(); if (dctx==NULL) return ERROR(memory_allocation); regenSize = ZSTDv04_decompressDCtx(dctx, dst, maxDstSize, src, srcSize); ZSTD_freeDCtx(dctx); return regenSize; #else ZSTD_DCtx dctx; return ZSTDv04_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize); #endif } size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); } size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx) { return ZSTD_nextSrcSizeToDecompress(dctx); } size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return ZSTD_decompressContinue(dctx, dst, maxDstSize, src, srcSize); } ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); } size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); } size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); } size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize) { return ZBUFF_decompressWithDictionary(dctx, src, srcSize); } size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr) { DEBUGLOG(5, "ZBUFFv04_decompressContinue"); return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr); } ZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); } size_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_legacy.h0000644000175000017500000003312513771325506025261 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LEGACY_H #define ZSTD_LEGACY_H #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include "../common/mem.h" /* MEM_STATIC */ #include "../common/error_private.h" /* ERROR */ #include "../common/zstd_internal.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */ #if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0) # undef ZSTD_LEGACY_SUPPORT # define ZSTD_LEGACY_SUPPORT 8 #endif #if (ZSTD_LEGACY_SUPPORT <= 1) # include "zstd_v01.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 2) # include "zstd_v02.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 3) # include "zstd_v03.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 4) # include "zstd_v04.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 5) # include "zstd_v05.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 6) # include "zstd_v06.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 7) # include "zstd_v07.h" #endif /** ZSTD_isLegacy() : @return : > 0 if supported by legacy decoder. 0 otherwise. return value is the version. */ MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize) { U32 magicNumberLE; if (srcSize<4) return 0; magicNumberLE = MEM_readLE32(src); switch(magicNumberLE) { #if (ZSTD_LEGACY_SUPPORT <= 1) case ZSTDv01_magicNumberLE:return 1; #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case ZSTDv02_magicNumber : return 2; #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case ZSTDv03_magicNumber : return 3; #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case ZSTDv04_magicNumber : return 4; #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case ZSTDv05_MAGICNUMBER : return 5; #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case ZSTDv06_MAGICNUMBER : return 6; #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case ZSTDv07_MAGICNUMBER : return 7; #endif default : return 0; } } MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize) { U32 const version = ZSTD_isLegacy(src, srcSize); if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */ #if (ZSTD_LEGACY_SUPPORT <= 5) if (version==5) { ZSTDv05_parameters fParams; size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.srcSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) if (version==6) { ZSTDv06_frameParams fParams; size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.frameContentSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) if (version==7) { ZSTDv07_frameParams fParams; size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.frameContentSize; } #endif return 0; /* should not be possible */ } MEM_STATIC size_t ZSTD_decompressLegacy( void* dst, size_t dstCapacity, const void* src, size_t compressedSize, const void* dict,size_t dictSize) { U32 const version = ZSTD_isLegacy(src, compressedSize); (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ switch(version) { #if (ZSTD_LEGACY_SUPPORT <= 1) case 1 : return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case 2 : return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case 3 : return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { size_t result; ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv05_freeDCtx(zd); return result; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { size_t result; ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv06_freeDCtx(zd); return result; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { size_t result; ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv07_freeDCtx(zd); return result; } #endif default : return ERROR(prefix_unknown); } } MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize) { ZSTD_frameSizeInfo frameSizeInfo; U32 const version = ZSTD_isLegacy(src, srcSize); switch(version) { #if (ZSTD_LEGACY_SUPPORT <= 1) case 1 : ZSTDv01_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case 2 : ZSTDv02_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case 3 : ZSTDv03_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : ZSTDv04_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : ZSTDv05_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : ZSTDv06_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : ZSTDv07_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif default : frameSizeInfo.compressedSize = ERROR(prefix_unknown); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; break; } if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) { frameSizeInfo.compressedSize = ERROR(srcSize_wrong); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; } return frameSizeInfo; } MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize) { ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize); return frameSizeInfo.compressedSize; } MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version) { switch(version) { default : case 1 : case 2 : case 3 : (void)legacyContext; return ERROR(version_unsupported); #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext); #endif } } MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, const void* dict, size_t dictSize) { DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion); if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); switch(newVersion) { default : case 1 : case 2 : case 3 : (void)dict; (void)dictSize; return 0; #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : { ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv04_decompressInit(dctx); ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif } } MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version); switch(version) { default : case 1 : case 2 : case 3 : (void)legacyContext; (void)output; (void)input; return ERROR(version_unsupported); #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : { ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif } } #if defined (__cplusplus) } #endif #endif /* ZSTD_LEGACY_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v01.c0000644000175000017500000021342513771325506024421 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /****************************************** * Includes ******************************************/ #include /* size_t, ptrdiff_t */ #include "zstd_v01.h" #include "../common/error_private.h" /****************************************** * Static allocation ******************************************/ /* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSE_MAX_MEMORY_USAGE 14 #define FSE_DEFAULT_MEMORY_USAGE 13 /* FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSE_MAX_SYMBOL_VALUE 255 /**************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION /**************************************************************** * Byte symbol type ****************************************************************/ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ /**************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ #ifndef MEM_ACCESS_MODULE #define MEM_ACCESS_MODULE /**************************************************************** * Basic Types *****************************************************************/ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif #endif /* MEM_ACCESS_MODULE */ /**************************************************************** * Memory I/O *****************************************************************/ /* FSE_FORCE_MEMORY_ACCESS * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets generating assembly depending on alignment. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define FSE_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define FSE_FORCE_MEMORY_ACCESS 1 # endif #endif static unsigned FSE_32bits(void) { return sizeof(void*)==4; } static unsigned FSE_isLittleEndian(void) { const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } #elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } #else static U16 FSE_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } static U32 FSE_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } static U64 FSE_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* FSE_FORCE_MEMORY_ACCESS */ static U16 FSE_readLE16(const void* memPtr) { if (FSE_isLittleEndian()) return FSE_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } static U32 FSE_readLE32(const void* memPtr) { if (FSE_isLittleEndian()) return FSE_read32(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); } } static U64 FSE_readLE64(const void* memPtr) { if (FSE_isLittleEndian()) return FSE_read64(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); } } static size_t FSE_readLEST(const void* memPtr) { if (FSE_32bits()) return (size_t)FSE_readLE32(memPtr); else return (size_t)FSE_readLE64(memPtr); } /**************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif /**************************************************************** * Error Management ****************************************************************/ #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /**************************************************************** * Complex types ****************************************************************/ typedef struct { int deltaFindState; U32 deltaNbBits; } FSE_symbolCompressionTransform; /* total 8 bytes */ typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; /**************************************************************** * Internal functions ****************************************************************/ FORCE_INLINE unsigned FSE_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /**************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } #define FSE_DECODE_TYPE FSE_decode_t typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ static size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */ const U32 tableSize = 1 << tableLog; const U32 tableMask = tableSize-1; const U32 step = FSE_tableStep(tableSize); U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; U32 position = 0; U32 highThreshold = tableSize-1; const S16 largeLimit= (S16)(1 << (tableLog-1)); U32 noLarge = 1; U32 s; /* Sanity Checks */ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge; if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* Init, lay down lowprob symbols */ DTableH[0].tableLog = (U16)tableLog; for (s=0; s<=maxSymbolValue; s++) { if (normalizedCounter[s]==-1) { tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) noLarge=0; symbolNext[s] = normalizedCounter[s]; } } /* Spread symbols */ for (s=0; s<=maxSymbolValue; s++) { int i; for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */ /* Build Decoding table */ { U32 i; for (i=0; ifastMode = (U16)noLarge; return 0; } /****************************************** * FSE byte symbol ******************************************/ #ifndef FSE_COMMONDEFS_ONLY static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); } static short FSE_abs(short a) { return a<0? -a : a; } /**************************************************************** * Header bitstream management ****************************************************************/ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { const BYTE* const istart = (const BYTE*) headerBuffer; const BYTE* const iend = istart + hbSize; const BYTE* ip = istart; int nbBits; int remaining; int threshold; U32 bitStream; int bitCount; unsigned charnum = 0; int previous0 = 0; if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong; bitStream = FSE_readLE32(ip); nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge; bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = FSE_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall; while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = FSE_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { const short max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSE_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } { if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = FSE_readLE32(ip) >> (bitCount & 31); } } } if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC; *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong; return ip-istart; } /********************************************************* * Decompression (Byte symbols) *********************************************************/ static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ DTableH->tableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSymbolValue = tableMask; unsigned s; /* Sanity checks */ if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s<=maxSymbolValue; s++) { dinfo[s].newState = 0; dinfo[s].symbol = (BYTE)s; dinfo[s].nbBits = (BYTE)nbBits; } return 0; } /* FSE_initDStream * Initialize a FSE_DStream_t. * srcBuffer must point at the beginning of an FSE block. * The function result is the size of the FSE_block (== srcSize). * If srcSize is too small, the function will return an errorCode; */ static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong; if (srcSize >= sizeof(size_t)) { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); bitD->bitContainer = FSE_readLEST(bitD->ptr); contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ bitD->bitsConsumed = 8 - FSE_highbit32(contain32); } else { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); /* fallthrough */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); /* fallthrough */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); /* fallthrough */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fallthrough */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fallthrough */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fallthrough */ default:; } contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ bitD->bitsConsumed = 8 - FSE_highbit32(contain32); bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; } return srcSize; } /*!FSE_lookBits * Provides next n bits from the bitContainer. * bitContainer is not modified (bits are still present for next read/look) * On 32-bits, maxNbBits==25 * On 64-bits, maxNbBits==57 * return : value extracted. */ static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } /*!FSE_readBits * Read next n bits from the bitContainer. * On 32-bits, don't read more than maxNbBits==25 * On 64-bits, don't read more than maxNbBits==57 * Use the fast variant *only* if n >= 1. * return : value extracted. */ static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits) { size_t value = FSE_lookBits(bitD, nbBits); FSE_skipBits(bitD, nbBits); return value; } static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ { size_t value = FSE_lookBitsFast(bitD, nbBits); FSE_skipBits(bitD, nbBits); return value; } static unsigned FSE_reloadDStream(FSE_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return FSE_DStream_tooFar; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = FSE_readLEST(bitD->ptr); return FSE_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer; return FSE_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; U32 result = FSE_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = FSE_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt) { const void* ptr = dt; const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog); FSE_reloadDStream(bitD); DStatePtr->table = dt + 1; } static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = FSE_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = FSE_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } /* FSE_endOfDStream Tells if bitD has reached end of bitStream or not */ static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD) { return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8)); } static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } FORCE_INLINE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt, const unsigned fast) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-3; FSE_DStream_t bitD; FSE_DState_t state1; FSE_DState_t state2; size_t errorCode; /* Init */ errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ if (FSE_isError(errorCode)) return errorCode; FSE_initDState(&state1, &bitD, dt); FSE_initDState(&state2, &bitD, dt); #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) /* 4 symbols per loop */ for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ FSE_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ FSE_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */ while (1) { if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) break; *op++ = FSE_GETSYMBOL(&state1); if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) break; *op++ = FSE_GETSYMBOL(&state2); } /* end ? */ if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) return op-ostart; if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */ return (size_t)-FSE_ERROR_corruptionDetected; } static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */ /* select fast mode (static) */ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; size_t errorCode; if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ /* normal FSE decoding mode */ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSE_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ ip += errorCode; cSrcSize -= errorCode; errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSE_isError(errorCode)) return errorCode; /* always return, even if it is an error code */ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); } /* ******************************************************* * Huff0 : Huffman block compression *********************************************************/ #define HUF_MAX_SYMBOL_VALUE 255 #define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */ #define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */ #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) # error "HUF_MAX_TABLELOG is too large !" #endif typedef struct HUF_CElt_s { U16 val; BYTE nbBits; } HUF_CElt ; typedef struct nodeElt_s { U32 count; U16 parent; BYTE byte; BYTE nbBits; } nodeElt; /* ******************************************************* * Huff0 : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DElt; static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 weightTotal; U32 maxBits; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; U32 n; U32 nextRankStart; void* ptr = DTable+1; HUF_DElt* const dt = (HUF_DElt*)ptr; if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; iSize = ip[0]; FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */ //memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */ if (iSize >= 128) /* special header */ { if (iSize >= (242)) /* RLE */ { static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, sizeof(huffWeight)); iSize = 0; } else /* Incompressible */ { oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; ip += 1; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else /* header compressed with FSE (normal case) */ { if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */ if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankVal, 0, sizeof(rankVal)); weightTotal = 0; for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected; rankVal[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected; /* get last non-null symbol weight (implied, total must be 2^n) */ maxBits = FSE_highbit32(weightTotal) + 1; if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */ DTable[0] = (U16)maxBits; { U32 total = 1 << maxBits; U32 rest = total - weightTotal; U32 verif = 1 << FSE_highbit32(rest); U32 lastWeight = FSE_highbit32(rest) + 1; if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankVal[lastWeight]++; } /* check tree construction validity */ if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n<=maxBits; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } /* fill DTable */ for (n=0; n<=oSize; n++) { const U32 w = huffWeight[n]; const U32 length = (1 << w) >> 1; U32 i; HUF_DElt D; D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize+1; } static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog) { const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; FSE_skipBits(Dstream, dt[val].nbBits); return c; } static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { if (cSrcSize < 6) return (size_t)-FSE_ERROR_srcSize_wrong; { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = maxDstSize < 15 ? op : omax-15; const void* ptr = DTable; const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1; const U32 dtLog = DTable[0]; size_t errorCode; U32 reloadStatus; /* Init */ const U16* jumpTable = (const U16*)cSrc; const size_t length1 = FSE_readLE16(jumpTable); const size_t length2 = FSE_readLE16(jumpTable+1); const size_t length3 = FSE_readLE16(jumpTable+2); const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; /* check coherency !! */ const char* const start1 = (const char*)(cSrc) + 6; const char* const start2 = start1 + length1; const char* const start3 = start2 + length2; const char* const start4 = start3 + length3; FSE_DStream_t bitD1, bitD2, bitD3, bitD4; if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; errorCode = FSE_initDStream(&bitD1, start1, length1); if (FSE_isError(errorCode)) return errorCode; errorCode = FSE_initDStream(&bitD2, start2, length2); if (FSE_isError(errorCode)) return errorCode; errorCode = FSE_initDStream(&bitD3, start3, length3); if (FSE_isError(errorCode)) return errorCode; errorCode = FSE_initDStream(&bitD4, start4, length4); if (FSE_isError(errorCode)) return errorCode; reloadStatus=FSE_reloadDStream(&bitD2); /* 16 symbols per loop */ for ( ; (reloadStatus12)) FSE_reloadDStream(&Dstream) #define HUF_DECODE_SYMBOL_2(n, Dstream) \ op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \ if (FSE_32bits()) FSE_reloadDStream(&Dstream) HUF_DECODE_SYMBOL_1( 0, bitD1); HUF_DECODE_SYMBOL_1( 1, bitD2); HUF_DECODE_SYMBOL_1( 2, bitD3); HUF_DECODE_SYMBOL_1( 3, bitD4); HUF_DECODE_SYMBOL_2( 4, bitD1); HUF_DECODE_SYMBOL_2( 5, bitD2); HUF_DECODE_SYMBOL_2( 6, bitD3); HUF_DECODE_SYMBOL_2( 7, bitD4); HUF_DECODE_SYMBOL_1( 8, bitD1); HUF_DECODE_SYMBOL_1( 9, bitD2); HUF_DECODE_SYMBOL_1(10, bitD3); HUF_DECODE_SYMBOL_1(11, bitD4); HUF_DECODE_SYMBOL_0(12, bitD1); HUF_DECODE_SYMBOL_0(13, bitD2); HUF_DECODE_SYMBOL_0(14, bitD3); HUF_DECODE_SYMBOL_0(15, bitD4); } if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */ return (size_t)-FSE_ERROR_corruptionDetected; /* tail */ { /* bitTail = bitD1; */ /* *much* slower : -20% !??! */ FSE_DStream_t bitTail; bitTail.ptr = bitD1.ptr; bitTail.bitsConsumed = bitD1.bitsConsumed; bitTail.bitContainer = bitD1.bitContainer; /* required in case of FSE_DStream_endOfBuffer */ bitTail.start = start1; for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; ip += errorCode; cSrcSize -= errorCode; return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable); } #endif /* FSE_COMMONDEFS_ONLY */ /* zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /**************************************************************** * Tuning parameters *****************************************************************/ /* MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect */ #define ZSTD_MEMORY_USAGE 17 /************************************** CPU Feature Detection **************************************/ /* * Automated efficient unaligned memory access detection * Based on known hardware architectures * This list will be updated thanks to feedbacks */ #if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \ || defined(__ARM_FEATURE_UNALIGNED) \ || defined(__i386__) || defined(__x86_64__) \ || defined(_M_IX86) || defined(_M_X64) \ || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \ || (defined(_M_ARM) && (_M_ARM >= 7)) # define ZSTD_UNALIGNED_ACCESS 1 #else # define ZSTD_UNALIGNED_ACCESS 0 #endif /******************************************************** * Includes *********************************************************/ #include /* calloc */ #include /* memcpy, memmove */ #include /* debug : printf */ /******************************************************** * Compiler specifics *********************************************************/ #ifdef __AVX2__ # include /* AVX2 intrinsics */ #endif #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif #ifndef MEM_ACCESS_MODULE #define MEM_ACCESS_MODULE /******************************************************** * Basic Types *********************************************************/ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; #endif #endif /* MEM_ACCESS_MODULE */ /******************************************************** * Constants *********************************************************/ static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */ #define HASH_LOG (ZSTD_MEMORY_USAGE - 2) #define HASH_TABLESIZE (1 << HASH_LOG) #define HASH_MASK (HASH_TABLESIZE - 1) #define KNUTH 2654435761 #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BLOCKSIZE (128 KB) /* define, for static allocation */ #define WORKPLACESIZE (BLOCKSIZE*3) #define MINMATCH 4 #define MLbits 7 #define LLbits 6 #define Offbits 5 #define MaxML ((1<>3]; #else U32 hashTable[HASH_TABLESIZE]; #endif BYTE buffer[WORKPLACESIZE]; } cctxi_t; /************************************** * Error Management **************************************/ /* published entry point */ unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); } /************************************** * Tool functions **************************************/ #define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ #define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */ #define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */ #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /************************************************************** * Decompression code **************************************************************/ static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { const BYTE* const in = (const BYTE* const)src; BYTE headerFlags; U32 cSize; if (srcSize < 3) return ERROR(srcSize_wrong); headerFlags = *in; cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); bpPtr->blockType = (blockType_t)(headerFlags >> 6); bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; if (bpPtr->blockType == bt_end) return 0; if (bpPtr->blockType == bt_rle) return 1; return cSize; } static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); if (srcSize > 0) { memcpy(dst, src, srcSize); } return srcSize; } static size_t ZSTD_decompressLiterals(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { BYTE* op = (BYTE*)dst; BYTE* const oend = op + maxDstSize; const BYTE* ip = (const BYTE*)src; size_t errorCode; size_t litSize; /* check : minimum 2, for litSize, +1, for content */ if (srcSize <= 3) return ERROR(corruption_detected); litSize = ip[1] + (ip[0]<<8); litSize += ((ip[-3] >> 3) & 7) << 16; /* mmmmh.... */ op = oend - litSize; (void)ctx; if (litSize > maxDstSize) return ERROR(dstSize_tooSmall); errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2); if (FSE_isError(errorCode)) return ERROR(GENERIC); return litSize; } static size_t ZSTDv01_decodeLiteralsBlock(void* ctx, void* dst, size_t maxDstSize, const BYTE** litStart, size_t* litSize, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* ip = istart; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; blockProperties_t litbp; size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp); if (ZSTDv01_isError(litcSize)) return litcSize; if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); ip += ZSTD_blockHeaderSize; switch(litbp.blockType) { case bt_raw: *litStart = ip; ip += litcSize; *litSize = litcSize; break; case bt_rle: { size_t rleSize = litbp.origSize; if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall); if (!srcSize) return ERROR(srcSize_wrong); if (rleSize > 0) { memset(oend - rleSize, *ip, rleSize); } *litStart = oend - rleSize; *litSize = rleSize; ip++; break; } case bt_compressed: { size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize); if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize; *litStart = oend - decodedLitSize; *litSize = decodedLitSize; ip += litcSize; break; } case bt_end: default: return ERROR(GENERIC); } return ip-istart; } static size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* ip = istart; const BYTE* const iend = istart + srcSize; U32 LLtype, Offtype, MLtype; U32 LLlog, Offlog, MLlog; size_t dumpsLength; /* check */ if (srcSize < 5) return ERROR(srcSize_wrong); /* SeqHead */ *nbSeq = ZSTD_readLE16(ip); ip+=2; LLtype = *ip >> 6; Offtype = (*ip >> 4) & 3; MLtype = (*ip >> 2) & 3; if (*ip & 2) { dumpsLength = ip[2]; dumpsLength += ip[1] << 8; ip += 3; } else { dumpsLength = ip[1]; dumpsLength += (ip[0] & 1) << 8; ip += 2; } *dumpsPtr = ip; ip += dumpsLength; *dumpsLengthPtr = dumpsLength; /* check */ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ /* sequences */ { S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ size_t headerSize; /* Build DTables */ switch(LLtype) { case bt_rle : LLlog = 0; FSE_buildDTable_rle(DTableLL, *ip++); break; case bt_raw : LLlog = LLbits; FSE_buildDTable_raw(DTableLL, LLbits); break; default : { U32 max = MaxLL; headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (LLlog > LLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableLL, norm, max, LLlog); } } switch(Offtype) { case bt_rle : Offlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableOffb, *ip++); break; case bt_raw : Offlog = Offbits; FSE_buildDTable_raw(DTableOffb, Offbits); break; default : { U32 max = MaxOff; headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (Offlog > OffFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableOffb, norm, max, Offlog); } } switch(MLtype) { case bt_rle : MLlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableML, *ip++); break; case bt_raw : MLlog = MLbits; FSE_buildDTable_raw(DTableML, MLbits); break; default : { U32 max = MaxML; headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (MLlog > MLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableML, norm, max, MLlog); } } } return ip-istart; } typedef struct { size_t litLength; size_t offset; size_t matchLength; } seq_t; typedef struct { FSE_DStream_t DStream; FSE_DState_t stateLL; FSE_DState_t stateOffb; FSE_DState_t stateML; size_t prevOffset; const BYTE* dumps; const BYTE* dumpsEnd; } seqState_t; static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) { size_t litLength; size_t prevOffset; size_t offset; size_t matchLength; const BYTE* dumps = seqState->dumps; const BYTE* const de = seqState->dumpsEnd; /* Literal length */ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); prevOffset = litLength ? seq->offset : seqState->prevOffset; seqState->prevOffset = seq->offset; if (litLength == MaxLL) { const U32 add = dumpsstateOffb), &(seqState->DStream)); if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); nbBits = offsetCode - 1; if (offsetCode==0) nbBits = 0; /* cmove */ offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits); if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); if (offsetCode==0) offset = prevOffset; } /* MatchLength */ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); if (matchLength == MaxML) { const U32 add = dumpslitLength = litLength; seq->offset = offset; seq->matchLength = matchLength; seqState->dumps = dumps; } static size_t ZSTD_execSequence(BYTE* op, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, BYTE* const base, BYTE* const oend) { static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ const BYTE* const ostart = op; const size_t litLength = sequence.litLength; BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ const BYTE* const litEnd = *litPtr + litLength; /* check */ if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ /* copy Literals */ if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) memmove(op, *litPtr, litLength); /* overwrite risk */ else ZSTD_wildcopy(op, *litPtr, litLength); op += litLength; *litPtr = litEnd; /* update for next sequence */ /* check : last match must be at a minimum distance of 8 from end of dest buffer */ if (oend-op < 8) return ERROR(dstSize_tooSmall); /* copy Match */ { const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12); const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */ size_t qutt = 12; U64 saved[2]; /* check */ if (match < base) return ERROR(corruption_detected); if (sequence.offset > (size_t)base) return ERROR(corruption_detected); /* save beginning of literal sequence, in case of write overlap */ if (overlapRisk) { if ((endMatch + qutt) > oend) qutt = oend-endMatch; memcpy(saved, endMatch, qutt); } if (sequence.offset < 8) { const int dec64 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTD_copy4(op+4, match); match -= dec64; } else { ZSTD_copy8(op, match); } op += 8; match += 8; if (endMatch > oend-(16-MINMATCH)) { if (op < oend-8) { ZSTD_wildcopy(op, match, (oend-8) - op); match += (oend-8) - op; op = oend-8; } while (opLLTable; U32* DTableML = dctx->MLTable; U32* DTableOffb = dctx->OffTable; BYTE* const base = (BYTE*) (dctx->base); /* Build Decoding Tables */ errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, DTableLL, DTableML, DTableOffb, ip, iend-ip); if (ZSTDv01_isError(errorCode)) return errorCode; ip += errorCode; /* Regen sequences */ { seq_t sequence; seqState_t seqState; memset(&sequence, 0, sizeof(sequence)); seqState.dumps = dumps; seqState.dumpsEnd = dumps + dumpsLength; seqState.prevOffset = 1; errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip); if (FSE_isError(errorCode)) return ERROR(corruption_detected); FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; ) { size_t oneSeqSize; nbSeq--; ZSTD_decodeSequence(&sequence, &seqState); oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend); if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } /* check if reached exact end */ if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ /* last literal segment */ { size_t lastLLSize = litEnd - litPtr; if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); if (lastLLSize > 0) { if (op != litPtr) memmove(op, litPtr, lastLLSize); op += lastLLSize; } } } return op-ostart; } static size_t ZSTD_decompressBlock( void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { /* blockType == blockCompressed, srcSize is trusted */ const BYTE* ip = (const BYTE*)src; const BYTE* litPtr = NULL; size_t litSize = 0; size_t errorCode; /* Decode literals sub-block */ errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize); if (ZSTDv01_isError(errorCode)) return errorCode; ip += errorCode; srcSize -= errorCode; return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize); } size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* iend = ip + srcSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t remainingSize = srcSize; U32 magicNumber; size_t errorCode=0; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); magicNumber = ZSTD_readBE32(src); if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; /* Loop on each block */ while (1) { size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties); if (ZSTDv01_isError(blockSize)) return blockSize; ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (blockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize); break; case bt_raw : errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize); break; case bt_rle : return ERROR(GENERIC); /* not yet supported */ break; case bt_end : /* end of frame */ if (remainingSize) return ERROR(srcSize_wrong); break; default: return ERROR(GENERIC); } if (blockSize == 0) break; /* bt_end */ if (ZSTDv01_isError(errorCode)) return errorCode; op += errorCode; ip += blockSize; remainingSize -= blockSize; } return op-ostart; } size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { dctx_t ctx; ctx.base = dst; return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); } /* ZSTD_errorFrameSizeInfoLegacy() : assumes `cSize` and `dBound` are _not_ NULL */ static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) { *cSize = ret; *dBound = ZSTD_CONTENTSIZE_ERROR; } void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) { const BYTE* ip = (const BYTE*)src; size_t remainingSize = srcSize; size_t nbBlocks = 0; U32 magicNumber; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } magicNumber = ZSTD_readBE32(src); if (magicNumber != ZSTD_magicNumber) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); return; } ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; /* Loop on each block */ while (1) { size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTDv01_isError(blockSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize); return; } ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (blockSize > remainingSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } if (blockSize == 0) break; /* bt_end */ ip += blockSize; remainingSize -= blockSize; nbBlocks++; } *cSize = ip - (const BYTE*)src; *dBound = nbBlocks * BLOCKSIZE; } /******************************* * Streaming Decompression API *******************************/ size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx) { dctx->expected = ZSTD_frameHeaderSize; dctx->phase = 0; dctx->previousDstEnd = NULL; dctx->base = NULL; return 0; } ZSTDv01_Dctx* ZSTDv01_createDCtx(void) { ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx)); if (dctx==NULL) return NULL; ZSTDv01_resetDCtx(dctx); return dctx; } size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx) { free(dctx); return 0; } size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx) { return ((dctx_t*)dctx)->expected; } size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { dctx_t* ctx = (dctx_t*)dctx; /* Sanity check */ if (srcSize != ctx->expected) return ERROR(srcSize_wrong); if (dst != ctx->previousDstEnd) /* not contiguous */ ctx->base = dst; /* Decompress : frame header */ if (ctx->phase == 0) { /* Check frame magic header */ U32 magicNumber = ZSTD_readBE32(src); if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; return 0; } /* Decompress : block header */ if (ctx->phase == 1) { blockProperties_t bp; size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTDv01_isError(blockSize)) return blockSize; if (bp.blockType == bt_end) { ctx->expected = 0; ctx->phase = 0; } else { ctx->expected = blockSize; ctx->bType = bp.blockType; ctx->phase = 2; } return 0; } /* Decompress : block content */ { size_t rSize; switch(ctx->bType) { case bt_compressed: rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); break; case bt_raw : rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); break; case bt_rle : return ERROR(GENERIC); /* not yet handled */ break; case bt_end : /* should never happen (filtered at phase 1) */ rSize = 0; break; default: return ERROR(GENERIC); } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v03.h0000644000175000017500000000721513771325506024426 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V03_H_298734209782 #define ZSTD_V03_H_298734209782 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv03_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.3.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error */ unsigned ZSTDv03_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx; ZSTDv03_Dctx* ZSTDv03_createDCtx(void); size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx); size_t ZSTDv03_decompressDCtx(void* ctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Streaming functions ***************************************/ size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx); size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx); size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv03_magicNumber 0xFD2FB523 /* v0.3 */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V03_H_298734209782 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v07.c0000644000175000017500000055154613771325506024440 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*- Dependencies -*/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ #include /* malloc, free, qsort */ #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ #endif #include "../common/xxhash.h" /* XXH64_* */ #include "zstd_v07.h" #define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */ #define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */ #define ZSTDv07_STATIC_LINKING_ONLY #include "../common/error_private.h" #ifdef ZSTDv07_STATIC_LINKING_ONLY /* ==================================================================================== * The definitions in this section are considered experimental. * They should never be used with a dynamic library, as they may change in the future. * They are provided for advanced usages. * Use them only in association with static linking. * ==================================================================================== */ /*--- Constants ---*/ #define ZSTDv07_MAGIC_SKIPPABLE_START 0x184D2A50U #define ZSTDv07_WINDOWLOG_MAX_32 25 #define ZSTDv07_WINDOWLOG_MAX_64 27 #define ZSTDv07_WINDOWLOG_MAX ((U32)(MEM_32bits() ? ZSTDv07_WINDOWLOG_MAX_32 : ZSTDv07_WINDOWLOG_MAX_64)) #define ZSTDv07_WINDOWLOG_MIN 18 #define ZSTDv07_CHAINLOG_MAX (ZSTDv07_WINDOWLOG_MAX+1) #define ZSTDv07_CHAINLOG_MIN 4 #define ZSTDv07_HASHLOG_MAX ZSTDv07_WINDOWLOG_MAX #define ZSTDv07_HASHLOG_MIN 12 #define ZSTDv07_HASHLOG3_MAX 17 #define ZSTDv07_SEARCHLOG_MAX (ZSTDv07_WINDOWLOG_MAX-1) #define ZSTDv07_SEARCHLOG_MIN 1 #define ZSTDv07_SEARCHLENGTH_MAX 7 #define ZSTDv07_SEARCHLENGTH_MIN 3 #define ZSTDv07_TARGETLENGTH_MIN 4 #define ZSTDv07_TARGETLENGTH_MAX 999 #define ZSTDv07_FRAMEHEADERSIZE_MAX 18 /* for static allocation */ static const size_t ZSTDv07_frameHeaderSize_min = 5; static const size_t ZSTDv07_frameHeaderSize_max = ZSTDv07_FRAMEHEADERSIZE_MAX; static const size_t ZSTDv07_skippableHeaderSize = 8; /* magic number + skippable frame length */ /* custom memory allocation functions */ typedef void* (*ZSTDv07_allocFunction) (void* opaque, size_t size); typedef void (*ZSTDv07_freeFunction) (void* opaque, void* address); typedef struct { ZSTDv07_allocFunction customAlloc; ZSTDv07_freeFunction customFree; void* opaque; } ZSTDv07_customMem; /*--- Advanced Decompression functions ---*/ /*! ZSTDv07_estimateDCtxSize() : * Gives the potential amount of memory allocated to create a ZSTDv07_DCtx */ ZSTDLIBv07_API size_t ZSTDv07_estimateDCtxSize(void); /*! ZSTDv07_createDCtx_advanced() : * Create a ZSTD decompression context using external alloc and free functions */ ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem); /*! ZSTDv07_sizeofDCtx() : * Gives the amount of memory used by a given ZSTDv07_DCtx */ ZSTDLIBv07_API size_t ZSTDv07_sizeofDCtx(const ZSTDv07_DCtx* dctx); /* ****************************************************************** * Buffer-less streaming functions (synchronous mode) ********************************************************************/ ZSTDLIBv07_API size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx); ZSTDLIBv07_API size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIBv07_API void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* preparedDCtx); ZSTDLIBv07_API size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx); ZSTDLIBv07_API size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* Buffer-less streaming decompression (synchronous mode) A ZSTDv07_DCtx object is required to track streaming operations. Use ZSTDv07_createDCtx() / ZSTDv07_freeDCtx() to manage it. A ZSTDv07_DCtx object can be re-used multiple times. First optional operation is to retrieve frame parameters, using ZSTDv07_getFrameParams(), which doesn't consume the input. It can provide the minimum size of rolling buffer required to properly decompress data (`windowSize`), and optionally the final size of uncompressed content. (Note : content size is an optional info that may not be present. 0 means : content size unknown) Frame parameters are extracted from the beginning of compressed frame. The amount of data to read is variable, from ZSTDv07_frameHeaderSize_min to ZSTDv07_frameHeaderSize_max (so if `srcSize` >= ZSTDv07_frameHeaderSize_max, it will always work) If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result. Result : 0 when successful, it means the ZSTDv07_frameParams structure has been filled. >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header. errorCode, which can be tested using ZSTDv07_isError() Start decompression, with ZSTDv07_decompressBegin() or ZSTDv07_decompressBegin_usingDict(). Alternatively, you can copy a prepared context, using ZSTDv07_copyDCtx(). Then use ZSTDv07_nextSrcSizeToDecompress() and ZSTDv07_decompressContinue() alternatively. ZSTDv07_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv07_decompressContinue(). ZSTDv07_decompressContinue() requires this exact amount of bytes, or it will fail. @result of ZSTDv07_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an error; it just means ZSTDv07_decompressContinue() has decoded some header. ZSTDv07_decompressContinue() needs previous data blocks during decompression, up to `windowSize`. They should preferably be located contiguously, prior to current block. Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters. ZSTDv07_decompressContinue() is very sensitive to contiguity, if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, or that previous contiguous segment is large enough to properly handle maximum back-reference. A frame is fully decoded when ZSTDv07_nextSrcSizeToDecompress() returns zero. Context can then be reset to start a new decompression. == Special case : skippable frames == Skippable frames allow the integration of user-defined data into a flow of concatenated frames. Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frame is following: a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits c) Frame Content - any content (User Data) of length equal to Frame Size For skippable frames ZSTDv07_decompressContinue() always returns 0. For skippable frames ZSTDv07_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable. It also returns Frame Size as fparamsPtr->frameContentSize. */ /* ************************************** * Block functions ****************************************/ /*! Block functions produce and decode raw zstd blocks, without frame metadata. Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes). User will have to take in charge required information to regenerate data, such as compressed and content sizes. A few rules to respect : - Compressing and decompressing require a context structure + Use ZSTDv07_createCCtx() and ZSTDv07_createDCtx() - It is necessary to init context before starting + compression : ZSTDv07_compressBegin() + decompression : ZSTDv07_decompressBegin() + variants _usingDict() are also allowed + copyCCtx() and copyDCtx() work too - Block size is limited, it must be <= ZSTDv07_getBlockSizeMax() + If you need to compress more, cut data into multiple blocks + Consider using the regular ZSTDv07_compress() instead, as frame metadata costs become negligible when source size is large. - When a block is considered not compressible enough, ZSTDv07_compressBlock() result will be zero. In which case, nothing is produced into `dst`. + User must test for such outcome and deal directly with uncompressed data + ZSTDv07_decompressBlock() doesn't accept uncompressed data as input !!! + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history. Use ZSTDv07_insertBlock() in such a case. */ #define ZSTDv07_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) /* define, for static allocation */ ZSTDLIBv07_API size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert block into `dctx` history. Useful for uncompressed blocks */ #endif /* ZSTDv07_STATIC_LINKING_ONLY */ /* ****************************************************************** mem.h low-level memory access routines Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * Compiler specifics ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif #if defined(__GNUC__) # define MEM_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /*-************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard, by lying on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_ulong(in); #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap32(in); #else return ((in << 24) & 0xff000000 ) | ((in << 8) & 0x00ff0000 ) | ((in >> 8) & 0x0000ff00 ) | ((in >> 24) & 0x000000ff ); #endif } MEM_STATIC U64 MEM_swap64(U64 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_uint64(in); #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap64(in); #else return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | ((in >> 8) & 0x00000000ff000000ULL) | ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); #endif } /*=== Little endian r/w ===*/ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else return MEM_swap32(MEM_read32(memPtr)); } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else return MEM_swap64(MEM_read64(memPtr)); } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /* ****************************************************************** bitstream Part of FSE library header file (to include) Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which must be inlined for best performance. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /*========================================= * Target specific =========================================*/ #if defined(__BMI__) && defined(__GNUC__) # include /* support for bextr (experimental) */ #endif /*-******************************************** * bitStream decoding API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; } BITv07_DStream_t; typedef enum { BITv07_DStream_unfinished = 0, BITv07_DStream_endOfBuffer = 1, BITv07_DStream_completed = 2, BITv07_DStream_overflow = 3 } BITv07_DStream_status; /* result of BITv07_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, unsigned nbBits); MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD); MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* bitD); /*-**************************************** * unsafe API ******************************************/ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /*-************************************************************** * Internal functions ****************************************************************/ MEM_STATIC unsigned BITv07_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; # endif } /*-******************************************************** * bitStream decoding **********************************************************/ /*! BITv07_initDStream() : * Initialize a BITv07_DStream_t. * `bitD` : a pointer to an already allocated BITv07_DStream_t structure. * `srcSize` must be the *exact* size of the bitStream, in bytes. * @return : size of stream (== srcSize) or an errorCode if a problem is detected */ MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } } else { bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */ default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BITv07_lookBits(const BITv07_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } /*! BITv07_lookBitsFast() : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } MEM_STATIC void BITv07_skipBits(BITv07_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits) { size_t const value = BITv07_lookBits(bitD, nbBits); BITv07_skipBits(bitD, nbBits); return value; } /*! BITv07_readBitsFast() : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits) { size_t const value = BITv07_lookBitsFast(bitD, nbBits); BITv07_skipBits(bitD, nbBits); return value; } MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should not happen => corruption detected */ return BITv07_DStream_overflow; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BITv07_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv07_DStream_endOfBuffer; return BITv07_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; BITv07_DStream_status result = BITv07_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BITv07_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } /*! BITv07_endOfDStream() : * @return Tells if DStream has exactly reached its end (all bits consumed). */ MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ /* ****************************************************************** FSE : Finite State Entropy codec Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef FSEv07_H #define FSEv07_H #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * FSE simple functions ******************************************/ /*! FSEv07_decompress(): Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'dstCapacity'. @return : size of regenerated data (<= maxDstSize), or an error code, which can be tested using FSEv07_isError() . ** Important ** : FSEv07_decompress() does not decompress non-compressible nor RLE data !!! Why ? : making this distinction requires a header. Header management is intentionally delegated to the user layer, which can better manage special cases. */ size_t FSEv07_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize); /* Error Management */ unsigned FSEv07_isError(size_t code); /* tells if a return value is an error code */ const char* FSEv07_getErrorName(size_t code); /* provides error code string (useful for debugging) */ /*-***************************************** * FSE detailed API ******************************************/ /*! FSEv07_decompress() does the following: 1. read normalized counters with readNCount() 2. build decoding table 'DTable' from normalized counters 3. decode the data stream using decoding table 'DTable' The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and provide normalized distribution using external method. */ /* *** DECOMPRESSION *** */ /*! FSEv07_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'. @return : size read from 'rBuffer', or an errorCode, which can be tested using FSEv07_isError(). maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); /*! Constructor and Destructor of FSEv07_DTable. Note that its size depends on 'tableLog' */ typedef unsigned FSEv07_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ FSEv07_DTable* FSEv07_createDTable(unsigned tableLog); void FSEv07_freeDTable(FSEv07_DTable* dt); /*! FSEv07_buildDTable(): Builds 'dt', which must be already allocated, using FSEv07_createDTable(). return : 0, or an errorCode, which can be tested using FSEv07_isError() */ size_t FSEv07_buildDTable (FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSEv07_decompress_usingDTable(): Decompress compressed source `cSrc` of size `cSrcSize` using `dt` into `dst` which must be already allocated. @return : size of regenerated data (necessarily <= `dstCapacity`), or an errorCode, which can be tested using FSEv07_isError() */ size_t FSEv07_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv07_DTable* dt); /*! Tutorial : ---------- (Note : these functions only decompress FSE-compressed blocks. If block is uncompressed, use memcpy() instead If block is a single repeated byte, use memset() instead ) The first step is to obtain the normalized frequencies of symbols. This can be performed by FSEv07_readNCount() if it was saved using FSEv07_writeNCount(). 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. In practice, that means it's necessary to know 'maxSymbolValue' beforehand, or size the table to handle worst case situations (typically 256). FSEv07_readNCount() will provide 'tableLog' and 'maxSymbolValue'. The result of FSEv07_readNCount() is the number of bytes read from 'rBuffer'. Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. If there is an error, the function will return an error code, which can be tested using FSEv07_isError(). The next step is to build the decompression tables 'FSEv07_DTable' from 'normalizedCounter'. This is performed by the function FSEv07_buildDTable(). The space required by 'FSEv07_DTable' must be already allocated using FSEv07_createDTable(). If there is an error, the function will return an error code, which can be tested using FSEv07_isError(). `FSEv07_DTable` can then be used to decompress `cSrc`, with FSEv07_decompress_usingDTable(). `cSrcSize` must be strictly correct, otherwise decompression will fail. FSEv07_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). If there is an error, the function will return an error code, which can be tested using FSEv07_isError(). (ex: dst buffer too small) */ #ifdef FSEv07_STATIC_LINKING_ONLY /* ***************************************** * Static allocation *******************************************/ /* FSE buffer bounds */ #define FSEv07_NCOUNTBOUND 512 #define FSEv07_BLOCKBOUND(size) (size + (size>>7)) /* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */ #define FSEv07_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ /* ====== Decompression ====== */ typedef struct { U16 tableLog; U16 fastMode; } FSEv07_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSEv07_decode_t; /* size == U32 */ MEM_STATIC void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt) { const void* ptr = dt; const FSEv07_DTableHeader* const DTableH = (const FSEv07_DTableHeader*)ptr; DStatePtr->state = BITv07_readBits(bitD, DTableH->tableLog); BITv07_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSEv07_peekSymbol(const FSEv07_DState_t* DStatePtr) { FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state]; return DInfo.symbol; } MEM_STATIC void FSEv07_updateState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD) { FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; size_t const lowBits = BITv07_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; } MEM_STATIC BYTE FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD) { FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BITv07_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } /*! FSEv07_decodeSymbolFast() : unsafe, only works if no symbol has a probability > 50% */ MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD) { FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BITv07_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } #ifndef FSEv07_COMMONDEFS_ONLY /* ************************************************************** * Tuning parameters ****************************************************************/ /*!MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSEv07_MAX_MEMORY_USAGE 14 #define FSEv07_DEFAULT_MEMORY_USAGE 13 /*!FSEv07_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSEv07_MAX_SYMBOL_VALUE 255 /* ************************************************************** * template functions type & suffix ****************************************************************/ #define FSEv07_FUNCTION_TYPE BYTE #define FSEv07_FUNCTION_EXTENSION #define FSEv07_DECODE_TYPE FSEv07_decode_t #endif /* !FSEv07_COMMONDEFS_ONLY */ /* *************************************************************** * Constants *****************************************************************/ #define FSEv07_MAX_TABLELOG (FSEv07_MAX_MEMORY_USAGE-2) #define FSEv07_MAX_TABLESIZE (1U< FSEv07_TABLELOG_ABSOLUTE_MAX # error "FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX is not supported" #endif #define FSEv07_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) #endif /* FSEv07_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif #endif /* FSEv07_H */ /* ****************************************************************** Huffman coder, part of New Generation Entropy library header file Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef HUFv07_H_298734234 #define HUFv07_H_298734234 #if defined (__cplusplus) extern "C" { #endif /* *** simple functions *** */ /** HUFv07_decompress() : Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', into already allocated buffer 'dst', of minimum size 'dstSize'. `dstSize` : **must** be the ***exact*** size of original (uncompressed) data. Note : in contrast with FSE, HUFv07_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate. @return : size of regenerated data (== dstSize), or an error code, which can be tested using HUFv07_isError() */ size_t HUFv07_decompress(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* **************************************** * Tool functions ******************************************/ #define HUFv07_BLOCKSIZE_MAX (128 * 1024) /* Error Management */ unsigned HUFv07_isError(size_t code); /**< tells if a return value is an error code */ const char* HUFv07_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ /* *** Advanced function *** */ #ifdef HUFv07_STATIC_LINKING_ONLY /* *** Constants *** */ #define HUFv07_TABLELOG_ABSOLUTEMAX 16 /* absolute limit of HUFv07_MAX_TABLELOG. Beyond that value, code does not work */ #define HUFv07_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUFv07_ABSOLUTEMAX_TABLELOG */ #define HUFv07_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ #define HUFv07_SYMBOLVALUE_MAX 255 #if (HUFv07_TABLELOG_MAX > HUFv07_TABLELOG_ABSOLUTEMAX) # error "HUFv07_TABLELOG_MAX is too large !" #endif /* **************************************** * Static allocation ******************************************/ /* HUF buffer bounds */ #define HUFv07_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ /* static allocation of HUF's DTable */ typedef U32 HUFv07_DTable; #define HUFv07_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) #define HUFv07_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ HUFv07_DTable DTable[HUFv07_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1)*0x1000001) } #define HUFv07_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ HUFv07_DTable DTable[HUFv07_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog)*0x1000001) } /* **************************************** * Advanced decompression functions ******************************************/ size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ size_t HUFv07_decompress4X_hufOnly(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ size_t HUFv07_decompress4X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUFv07_decompress4X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUFv07_decompress1X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUFv07_decompress1X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ /* **************************************** * HUF detailed API ******************************************/ /*! The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and regenerate 'CTable' using external methods. */ /* FSEv07_count() : find it within "fse.h" */ /*! HUFv07_readStats() : Read compact Huffman tree, saved by HUFv07_writeCTable(). `huffWeight` is destination buffer. @return : size read from `src` , or an error Code . Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() . */ size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize); /* HUFv07_decompress() does the following: 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics 2. build Huffman table from save, using HUFv07_readDTableXn() 3. decode 1 or 4 segments in parallel using HUFv07_decompressSXn_usingDTable */ /** HUFv07_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-determined metrics. * @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 . * Assumption : 0 < cSrcSize < dstSize <= 128 KB */ U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize); size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize); size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize); size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable); size_t HUFv07_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable); size_t HUFv07_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable); /* single stream variants */ size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable); size_t HUFv07_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable); size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable); #endif /* HUFv07_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif #endif /* HUFv07_H_298734234 */ /* Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c *************************************************************************** */ /*-**************************************** * FSE Error Management ******************************************/ unsigned FSEv07_isError(size_t code) { return ERR_isError(code); } const char* FSEv07_getErrorName(size_t code) { return ERR_getErrorName(code); } /* ************************************************************** * HUF Error Management ****************************************************************/ unsigned HUFv07_isError(size_t code) { return ERR_isError(code); } const char* HUFv07_getErrorName(size_t code) { return ERR_getErrorName(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ static short FSEv07_abs(short a) { return (short)(a<0 ? -a : a); } size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { const BYTE* const istart = (const BYTE*) headerBuffer; const BYTE* const iend = istart + hbSize; const BYTE* ip = istart; int nbBits; int remaining; int threshold; U32 bitStream; int bitCount; unsigned charnum = 0; int previous0 = 0; if (hbSize < 4) return ERROR(srcSize_wrong); bitStream = MEM_readLE32(ip); nbBits = (bitStream & 0xF) + FSEv07_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSEv07_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { short const max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSEv07_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } /* while ((remaining>1) && (charnum<=*maxSVPtr)) */ if (remaining != 1) return ERROR(GENERIC); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); return ip-istart; } /*! HUFv07_readStats() : Read compact Huffman tree, saved by HUFv07_writeCTable(). `huffWeight` is destination buffer. @return : size read from `src` , or an error Code . Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() . */ size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ if (iSize >= (242)) { /* RLE */ static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, hwSize); iSize = 0; } else { /* Incompressible */ oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; { U32 n; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } } else { /* header compressed with FSE (normal case) */ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSEv07_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ if (FSEv07_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUFv07_TABLELOG_ABSOLUTEMAX + 1) * sizeof(U32)); weightTotal = 0; { U32 n; for (n=0; n= HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ { U32 const tableLog = BITv07_highbit32(weightTotal) + 1; if (tableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; U32 const verif = 1 << BITv07_highbit32(rest); U32 const lastWeight = BITv07_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); return iSize+1; } /* ****************************************************************** FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /* ************************************************************** * Error Management ****************************************************************/ #define FSEv07_isError ERR_isError #define FSEv07_STATIC_ASSERT(c) { enum { FSEv07_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************************************** * Complex types ****************************************************************/ typedef U32 DTable_max_t[FSEv07_DTABLE_SIZE_U32(FSEv07_MAX_TABLELOG)]; /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSEv07_FUNCTION_EXTENSION # error "FSEv07_FUNCTION_EXTENSION must be defined" #endif #ifndef FSEv07_FUNCTION_TYPE # error "FSEv07_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSEv07_CAT(X,Y) X##Y #define FSEv07_FUNCTION_NAME(X,Y) FSEv07_CAT(X,Y) #define FSEv07_TYPE_NAME(X,Y) FSEv07_CAT(X,Y) /* Function templates */ FSEv07_DTable* FSEv07_createDTable (unsigned tableLog) { if (tableLog > FSEv07_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv07_TABLELOG_ABSOLUTE_MAX; return (FSEv07_DTable*)malloc( FSEv07_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); } void FSEv07_freeDTable (FSEv07_DTable* dt) { free(dt); } size_t FSEv07_buildDTable(FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ FSEv07_DECODE_TYPE* const tableDecode = (FSEv07_DECODE_TYPE*) (tdPtr); U16 symbolNext[FSEv07_MAX_SYMBOL_VALUE+1]; U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; U32 highThreshold = tableSize-1; /* Sanity Checks */ if (maxSymbolValue > FSEv07_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSEv07_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ { FSEv07_DTableHeader DTableH; DTableH.tableLog = (U16)tableLog; DTableH.fastMode = 1; { S16 const largeLimit= (S16)(1 << (tableLog-1)); U32 s; for (s=0; s= largeLimit) DTableH.fastMode=0; symbolNext[s] = normalizedCounter[s]; } } } memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ { U32 const tableMask = tableSize-1; U32 const step = FSEv07_TABLESTEP(tableSize); U32 s, position = 0; for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } /* Build Decoding table */ { U32 u; for (u=0; utableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits) { void* ptr = dt; FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr; void* dPtr = dt + 1; FSEv07_decode_t* const dinfo = (FSEv07_decode_t*)dPtr; const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSV1 = tableMask+1; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ BITv07_reloadDStream(&bitD); op[1] = FSEv07_GETSYMBOL(&state2); if (FSEv07_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BITv07_reloadDStream(&bitD) > BITv07_DStream_unfinished) { op+=2; break; } } op[2] = FSEv07_GETSYMBOL(&state1); if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BITv07_reloadDStream(&bitD); op[3] = FSEv07_GETSYMBOL(&state2); } /* tail */ /* note : BITv07_reloadDStream(&bitD) >= FSEv07_DStream_partiallyFilled; Ends at exactly BITv07_DStream_completed */ while (1) { if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSEv07_GETSYMBOL(&state1); if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) { *op++ = FSEv07_GETSYMBOL(&state2); break; } if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSEv07_GETSYMBOL(&state2); if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) { *op++ = FSEv07_GETSYMBOL(&state1); break; } } return op-ostart; } size_t FSEv07_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSEv07_DTable* dt) { const void* ptr = dt; const FSEv07_DTableHeader* DTableH = (const FSEv07_DTableHeader*)ptr; const U32 fastMode = DTableH->fastMode; /* select fast mode (static) */ if (fastMode) return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSEv07_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSEv07_MAX_SYMBOL_VALUE; if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ /* normal FSE decoding mode */ { size_t const NCountLength = FSEv07_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSEv07_isError(NCountLength)) return NCountLength; if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ ip += NCountLength; cSrcSize -= NCountLength; } { size_t const errorCode = FSEv07_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSEv07_isError(errorCode)) return errorCode; } return FSEv07_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); /* always return, even if it is an error code */ } #endif /* FSEv07_COMMONDEFS_ONLY */ /* ****************************************************************** Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) /* inline is defined */ #elif defined(_MSC_VER) # define inline __inline #else # define inline /* disable inline */ #endif #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Error Management ****************************************************************/ #define HUFv07_STATIC_ASSERT(c) { enum { HUFv07_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /*-***************************/ /* generic DTableDesc */ /*-***************************/ typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; static DTableDesc HUFv07_getDTableDesc(const HUFv07_DTable* table) { DTableDesc dtd; memcpy(&dtd, table, sizeof(dtd)); return dtd; } /*-***************************/ /* single-symbol decoding */ /*-***************************/ typedef struct { BYTE byte; BYTE nbBits; } HUFv07_DEltX2; /* single-symbol decoding */ size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUFv07_SYMBOLVALUE_MAX + 1]; U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; U32 nbSymbols = 0; size_t iSize; void* const dtPtr = DTable + 1; HUFv07_DEltX2* const dt = (HUFv07_DEltX2*)dtPtr; HUFv07_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUFv07_DTable)); /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv07_readStats(huffWeight, HUFv07_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUFv07_isError(iSize)) return iSize; /* Table header */ { DTableDesc dtd = HUFv07_getDTableDesc(DTable); if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, huffman tree cannot fit in */ dtd.tableType = 0; dtd.tableLog = (BYTE)tableLog; memcpy(DTable, &dtd, sizeof(dtd)); } /* Prepare ranks */ { U32 n, nextRankStart = 0; for (n=1; n> 1; U32 i; HUFv07_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } } return iSize; } static BYTE HUFv07_decodeSymbolX2(BITv07_DStream_t* Dstream, const HUFv07_DEltX2* dt, const U32 dtLog) { size_t const val = BITv07_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ BYTE const c = dt[val].byte; BITv07_skipBits(Dstream, dt[val].nbBits); return c; } #define HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ *ptr++ = HUFv07_decodeSymbolX2(DStreamPtr, dt, dtLog) #define HUFv07_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \ HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) #define HUFv07_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) static inline size_t HUFv07_decodeStreamX2(BYTE* p, BITv07_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv07_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-4)) { HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr); HUFv07_DECODE_SYMBOLX2_1(p, bitDPtr); HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr); HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to the end */ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd)) HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, hence no need to reload */ while (p < pEnd) HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } static size_t HUFv07_decompress1X2_usingDTable_internal( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { BYTE* op = (BYTE*)dst; BYTE* const oend = op + dstSize; const void* dtPtr = DTable + 1; const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr; BITv07_DStream_t bitD; DTableDesc const dtd = HUFv07_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize); if (HUFv07_isError(errorCode)) return errorCode; } HUFv07_decodeStreamX2(op, &bitD, oend, dt, dtLog); /* check */ if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected); return dstSize; } size_t HUFv07_decompress1X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { DTableDesc dtd = HUFv07_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); return HUFv07_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); } size_t HUFv07_decompress1X2_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUFv07_readDTableX2 (DCtx, cSrc, cSrcSize); if (HUFv07_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv07_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); } size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX); return HUFv07_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); } static size_t HUFv07_decompress4X2_usingDTable_internal( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable + 1; const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr; /* Init */ BITv07_DStream_t bitD1; BITv07_DStream_t bitD2; BITv07_DStream_t bitD3; BITv07_DStream_t bitD4; size_t const length1 = MEM_readLE16(istart); size_t const length2 = MEM_readLE16(istart+2); size_t const length3 = MEM_readLE16(istart+4); size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; DTableDesc const dtd = HUFv07_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1); if (HUFv07_isError(errorCode)) return errorCode; } { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2); if (HUFv07_isError(errorCode)) return errorCode; } { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3); if (HUFv07_isError(errorCode)) return errorCode; } { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4); if (HUFv07_isError(errorCode)) return errorCode; } /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4); for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) { HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1); HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2); HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3); HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4); HUFv07_DECODE_SYMBOLX2_1(op1, &bitD1); HUFv07_DECODE_SYMBOLX2_1(op2, &bitD2); HUFv07_DECODE_SYMBOLX2_1(op3, &bitD3); HUFv07_DECODE_SYMBOLX2_1(op4, &bitD4); HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1); HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2); HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3); HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4); HUFv07_DECODE_SYMBOLX2_0(op1, &bitD1); HUFv07_DECODE_SYMBOLX2_0(op2, &bitD2); HUFv07_DECODE_SYMBOLX2_0(op3, &bitD3); HUFv07_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUFv07_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUFv07_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUFv07_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUFv07_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } size_t HUFv07_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { DTableDesc dtd = HUFv07_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); return HUFv07_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); } size_t HUFv07_decompress4X2_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUFv07_readDTableX2 (dctx, cSrc, cSrcSize); if (HUFv07_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv07_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx); } size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX); return HUFv07_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } /* *************************/ /* double-symbols decoding */ /* *************************/ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv07_DEltX4; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; static void HUFv07_fillDTableX4Level2(HUFv07_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUFv07_DEltX4 DElt; U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1]; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ { U32 s; for (s=0; s= 1 */ rankVal[weight] += length; }} } typedef U32 rankVal_t[HUFv07_TABLELOG_ABSOLUTEMAX][HUFv07_TABLELOG_ABSOLUTEMAX + 1]; static void HUFv07_fillDTableX4(HUFv07_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) { /* enough room for a second symbol */ U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUFv07_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { HUFv07_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; { U32 u; const U32 end = start + length; for (u = start; u < end; u++) DTable[u] = DElt; } } rankVal[weight] += length; } } size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize) { BYTE weightList[HUFv07_SYMBOLVALUE_MAX + 1]; sortedSymbol_t sortedSymbol[HUFv07_SYMBOLVALUE_MAX + 1]; U32 rankStats[HUFv07_TABLELOG_ABSOLUTEMAX + 1] = { 0 }; U32 rankStart0[HUFv07_TABLELOG_ABSOLUTEMAX + 2] = { 0 }; U32* const rankStart = rankStart0+1; rankVal_t rankVal; U32 tableLog, maxW, sizeOfSort, nbSymbols; DTableDesc dtd = HUFv07_getDTableDesc(DTable); U32 const maxTableLog = dtd.maxTableLog; size_t iSize; void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ HUFv07_DEltX4* const dt = (HUFv07_DEltX4*)dtPtr; HUFv07_STATIC_ASSERT(sizeof(HUFv07_DEltX4) == sizeof(HUFv07_DTable)); /* if compilation fails here, assertion is false */ if (maxTableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge); /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv07_readStats(weightList, HUFv07_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUFv07_isError(iSize)) return iSize; /* check result */ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w> consumed; } } } } HUFv07_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); dtd.tableLog = (BYTE)maxTableLog; dtd.tableType = 1; memcpy(DTable, &dtd, sizeof(dtd)); return iSize; } static U32 HUFv07_decodeSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog) { const size_t val = BITv07_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BITv07_skipBits(DStream, dt[val].nbBits); return dt[val].length; } static U32 HUFv07_decodeLastSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog) { const size_t val = BITv07_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BITv07_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BITv07_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ } } return 1; } #define HUFv07_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUFv07_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \ ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUFv07_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) static inline size_t HUFv07_decodeStreamX4(BYTE* p, BITv07_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv07_DEltX4* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd-7)) { HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr); HUFv07_DECODE_SYMBOLX4_1(p, bitDPtr); HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr); HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr); } /* closer to end : up to 2 symbols at a time */ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-2)) HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr); while (p <= pEnd-2) HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUFv07_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); return p-pStart; } static size_t HUFv07_decompress1X4_usingDTable_internal( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { BITv07_DStream_t bitD; /* Init */ { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize); if (HUFv07_isError(errorCode)) return errorCode; } /* decode */ { BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr; DTableDesc const dtd = HUFv07_getDTableDesc(DTable); HUFv07_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); } /* check */ if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected); /* decoded size */ return dstSize; } size_t HUFv07_decompress1X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { DTableDesc dtd = HUFv07_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); return HUFv07_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); } size_t HUFv07_decompress1X4_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUFv07_readDTableX4 (DCtx, cSrc, cSrcSize); if (HUFv07_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv07_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); } size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX); return HUFv07_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } static size_t HUFv07_decompress4X4_usingDTable_internal( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable+1; const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr; /* Init */ BITv07_DStream_t bitD1; BITv07_DStream_t bitD2; BITv07_DStream_t bitD3; BITv07_DStream_t bitD4; size_t const length1 = MEM_readLE16(istart); size_t const length2 = MEM_readLE16(istart+2); size_t const length3 = MEM_readLE16(istart+4); size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; size_t const segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; DTableDesc const dtd = HUFv07_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1); if (HUFv07_isError(errorCode)) return errorCode; } { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2); if (HUFv07_isError(errorCode)) return errorCode; } { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3); if (HUFv07_isError(errorCode)) return errorCode; } { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4); if (HUFv07_isError(errorCode)) return errorCode; } /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4); for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) { HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1); HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2); HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3); HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4); HUFv07_DECODE_SYMBOLX4_1(op1, &bitD1); HUFv07_DECODE_SYMBOLX4_1(op2, &bitD2); HUFv07_DECODE_SYMBOLX4_1(op3, &bitD3); HUFv07_DECODE_SYMBOLX4_1(op4, &bitD4); HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1); HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2); HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3); HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4); HUFv07_DECODE_SYMBOLX4_0(op1, &bitD1); HUFv07_DECODE_SYMBOLX4_0(op2, &bitD2); HUFv07_DECODE_SYMBOLX4_0(op3, &bitD3); HUFv07_DECODE_SYMBOLX4_0(op4, &bitD4); endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUFv07_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); HUFv07_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); HUFv07_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); HUFv07_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ { U32 const endCheck = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4); if (!endCheck) return ERROR(corruption_detected); } /* decoded size */ return dstSize; } } size_t HUFv07_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { DTableDesc dtd = HUFv07_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); } size_t HUFv07_decompress4X4_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUFv07_readDTableX4 (dctx, cSrc, cSrcSize); if (HUFv07_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); } size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX); return HUFv07_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } /* ********************************/ /* Generic decompression selector */ /* ********************************/ size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { DTableDesc const dtd = HUFv07_getDTableDesc(DTable); return dtd.tableType ? HUFv07_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : HUFv07_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); } size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable) { DTableDesc const dtd = HUFv07_getDTableDesc(DTable); return dtd.tableType ? HUFv07_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : HUFv07_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); } typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { /* single, double, quad */ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; /** HUFv07_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-determined metrics. * @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 . * Assumption : 0 < cSrcSize < dstSize <= 128 KB */ U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize) { /* decoder timing evaluation */ U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ U32 const D256 = (U32)(dstSize >> 8); U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ return DTime1 < DTime0; } typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUFv07_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { static const decompressionAlgo decompress[2] = { HUFv07_decompress4X2, HUFv07_decompress4X4 }; /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize); return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); } /* return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */ /* return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */ } size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize); return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; } } size_t HUFv07_decompress4X_hufOnly (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize); return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; } } size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize); return algoNb ? HUFv07_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : HUFv07_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; } } /* Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : http://www.zstd.net/ */ /*-**************************************** * ZSTD Error Management ******************************************/ /*! ZSTDv07_isError() : * tells if a return value is an error code */ unsigned ZSTDv07_isError(size_t code) { return ERR_isError(code); } /*! ZSTDv07_getErrorName() : * provides error code string from function result (useful for debugging) */ const char* ZSTDv07_getErrorName(size_t code) { return ERR_getErrorName(code); } /* ************************************************************** * ZBUFF Error Management ****************************************************************/ unsigned ZBUFFv07_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZBUFFv07_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } static void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size) { void* address = malloc(size); (void)opaque; /* printf("alloc %p, %d opaque=%p \n", address, (int)size, opaque); */ return address; } static void ZSTDv07_defaultFreeFunction(void* opaque, void* address) { (void)opaque; /* if (address) printf("free %p opaque=%p \n", address, opaque); */ free(address); } /* zstd_internal - common functions to include Header File for include Copyright (C) 2014-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : https://www.zstd.net */ #ifndef ZSTDv07_CCOMMON_H_MODULE #define ZSTDv07_CCOMMON_H_MODULE /*-************************************* * Common macros ***************************************/ #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) /*-************************************* * Common constants ***************************************/ #define ZSTDv07_OPT_NUM (1<<12) #define ZSTDv07_DICT_MAGIC 0xEC30A437 /* v0.7 */ #define ZSTDv07_REP_NUM 3 #define ZSTDv07_REP_INIT ZSTDv07_REP_NUM #define ZSTDv07_REP_MOVE (ZSTDv07_REP_NUM-1) static const U32 repStartValue[ZSTDv07_REP_NUM] = { 1, 4, 8 }; #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define BIT1 2 #define BIT0 1 #define ZSTDv07_WINDOWLOG_ABSOLUTEMIN 10 static const size_t ZSTDv07_fcs_fieldSize[4] = { 0, 2, 4, 8 }; static const size_t ZSTDv07_did_fieldSize[4] = { 0, 1, 2, 4 }; #define ZSTDv07_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ static const size_t ZSTDv07_blockHeaderSize = ZSTDv07_BLOCKHEADERSIZE; typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ #define HufLog 12 typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t; #define LONGNBSEQ 0x7F00 #define MINMATCH 3 #define EQUAL_READ32 4 #define Litbits 8 #define MaxLit ((1< /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ # pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ #endif /*-************************************* * Macros ***************************************/ #define ZSTDv07_isError ERR_isError /* for inlining */ #define FSEv07_isError ERR_isError #define HUFv07_isError ERR_isError /*_******************************************************* * Memory operations **********************************************************/ static void ZSTDv07_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } /*-************************************************************* * Context management ***************************************************************/ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTDv07_dStage; struct ZSTDv07_DCtx_s { FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)]; FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)]; FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)]; HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)]; /* can accommodate HUFv07_decompress4X */ const void* previousDstEnd; const void* base; const void* vBase; const void* dictEnd; size_t expected; U32 rep[3]; ZSTDv07_frameParams fParams; blockType_t bType; /* used in ZSTDv07_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ ZSTDv07_dStage stage; U32 litEntropy; U32 fseEntropy; XXH64_state_t xxhState; size_t headerSize; U32 dictID; const BYTE* litPtr; ZSTDv07_customMem customMem; size_t litSize; BYTE litBuffer[ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH]; BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX]; }; /* typedef'd to ZSTDv07_DCtx within "zstd_static.h" */ int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx); size_t ZSTDv07_sizeofDCtx (const ZSTDv07_DCtx* dctx) { return sizeof(*dctx); } size_t ZSTDv07_estimateDCtxSize(void) { return sizeof(ZSTDv07_DCtx); } size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx) { dctx->expected = ZSTDv07_frameHeaderSize_min; dctx->stage = ZSTDds_getFrameHeaderSize; dctx->previousDstEnd = NULL; dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001); dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; { int i; for (i=0; irep[i] = repStartValue[i]; } return 0; } ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem) { ZSTDv07_DCtx* dctx; if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; if (!customMem.customAlloc || !customMem.customFree) return NULL; dctx = (ZSTDv07_DCtx*) customMem.customAlloc(customMem.opaque, sizeof(ZSTDv07_DCtx)); if (!dctx) return NULL; memcpy(&dctx->customMem, &customMem, sizeof(ZSTDv07_customMem)); ZSTDv07_decompressBegin(dctx); return dctx; } ZSTDv07_DCtx* ZSTDv07_createDCtx(void) { return ZSTDv07_createDCtx_advanced(defaultCustomMem); } size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx) { if (dctx==NULL) return 0; /* support free on NULL */ dctx->customMem.customFree(dctx->customMem.opaque, dctx); return 0; /* reserved as a potential error code in the future */ } void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dstDCtx, const ZSTDv07_DCtx* srcDCtx) { memcpy(dstDCtx, srcDCtx, sizeof(ZSTDv07_DCtx) - (ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH + ZSTDv07_frameHeaderSize_max)); /* no need to copy workspace */ } /*-************************************************************* * Decompression section ***************************************************************/ /* Frame format description Frame Header - [ Block Header - Block ] - Frame End 1) Frame Header - 4 bytes - Magic Number : ZSTDv07_MAGICNUMBER (defined within zstd.h) - 1 byte - Frame Descriptor 2) Block Header - 3 bytes, starting with a 2-bits descriptor Uncompressed, Compressed, Frame End, unused 3) Block See Block Format Description 4) Frame End - 3 bytes, compatible with Block Header */ /* Frame Header : 1 byte - FrameHeaderDescription : bit 0-1 : dictID (0, 1, 2 or 4 bytes) bit 2 : checksumFlag bit 3 : reserved (must be zero) bit 4 : reserved (unused, can be any value) bit 5 : Single Segment (if 1, WindowLog byte is not present) bit 6-7 : FrameContentFieldSize (0, 2, 4, or 8) if (SkippedWindowLog && !FrameContentFieldsize) FrameContentFieldsize=1; Optional : WindowLog (0 or 1 byte) bit 0-2 : octal Fractional (1/8th) bit 3-7 : Power of 2, with 0 = 1 KB (up to 2 TB) Optional : dictID (0, 1, 2 or 4 bytes) Automatic adaptation 0 : no dictID 1 : 1 - 255 2 : 256 - 65535 4 : all other values Optional : content size (0, 1, 2, 4 or 8 bytes) 0 : unknown (fcfs==0 and swl==0) 1 : 0-255 bytes (fcfs==0 and swl==1) 2 : 256 - 65535+256 (fcfs==1) 4 : 0 - 4GB-1 (fcfs==2) 8 : 0 - 16EB-1 (fcfs==3) */ /* Compressed Block, format description Block = Literal Section - Sequences Section Prerequisite : size of (compressed) block, maximum size of regenerated data 1) Literal Section 1.1) Header : 1-5 bytes flags: 2 bits 00 compressed by Huff0 01 unused 10 is Raw (uncompressed) 11 is Rle Note : using 01 => Huff0 with precomputed table ? Note : delta map ? => compressed ? 1.1.1) Huff0-compressed literal block : 3-5 bytes srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream srcSize < 1 KB => 3 bytes (2-2-10-10) srcSize < 16KB => 4 bytes (2-2-14-14) else => 5 bytes (2-2-18-18) big endian convention 1.1.2) Raw (uncompressed) literal block header : 1-3 bytes size : 5 bits: (IS_RAW<<6) + (0<<4) + size 12 bits: (IS_RAW<<6) + (2<<4) + (size>>8) size&255 20 bits: (IS_RAW<<6) + (3<<4) + (size>>16) size>>8&255 size&255 1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes size : 5 bits: (IS_RLE<<6) + (0<<4) + size 12 bits: (IS_RLE<<6) + (2<<4) + (size>>8) size&255 20 bits: (IS_RLE<<6) + (3<<4) + (size>>16) size>>8&255 size&255 1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream srcSize < 1 KB => 3 bytes (2-2-10-10) srcSize < 16KB => 4 bytes (2-2-14-14) else => 5 bytes (2-2-18-18) big endian convention 1- CTable available (stored into workspace ?) 2- Small input (fast heuristic ? Full comparison ? depend on clevel ?) 1.2) Literal block content 1.2.1) Huff0 block, using sizes from header See Huff0 format 1.2.2) Huff0 block, using prepared table 1.2.3) Raw content 1.2.4) single byte 2) Sequences section TO DO */ /** ZSTDv07_frameHeaderSize() : * srcSize must be >= ZSTDv07_frameHeaderSize_min. * @return : size of the Frame Header */ static size_t ZSTDv07_frameHeaderSize(const void* src, size_t srcSize) { if (srcSize < ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong); { BYTE const fhd = ((const BYTE*)src)[4]; U32 const dictID= fhd & 3; U32 const directMode = (fhd >> 5) & 1; U32 const fcsId = fhd >> 6; return ZSTDv07_frameHeaderSize_min + !directMode + ZSTDv07_did_fieldSize[dictID] + ZSTDv07_fcs_fieldSize[fcsId] + (directMode && !ZSTDv07_fcs_fieldSize[fcsId]); } } /** ZSTDv07_getFrameParams() : * decode Frame Header, or require larger `srcSize`. * @return : 0, `fparamsPtr` is correctly filled, * >0, `srcSize` is too small, result is expected `srcSize`, * or an error code, which can be tested using ZSTDv07_isError() */ size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; if (srcSize < ZSTDv07_frameHeaderSize_min) return ZSTDv07_frameHeaderSize_min; memset(fparamsPtr, 0, sizeof(*fparamsPtr)); if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) { if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) { if (srcSize < ZSTDv07_skippableHeaderSize) return ZSTDv07_skippableHeaderSize; /* magic number + skippable frame length */ fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4); fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */ return 0; } return ERROR(prefix_unknown); } /* ensure there is enough `srcSize` to fully read/decode frame header */ { size_t const fhsize = ZSTDv07_frameHeaderSize(src, srcSize); if (srcSize < fhsize) return fhsize; } { BYTE const fhdByte = ip[4]; size_t pos = 5; U32 const dictIDSizeCode = fhdByte&3; U32 const checksumFlag = (fhdByte>>2)&1; U32 const directMode = (fhdByte>>5)&1; U32 const fcsID = fhdByte>>6; U32 const windowSizeMax = 1U << ZSTDv07_WINDOWLOG_MAX; U32 windowSize = 0; U32 dictID = 0; U64 frameContentSize = 0; if ((fhdByte & 0x08) != 0) /* reserved bits, which must be zero */ return ERROR(frameParameter_unsupported); if (!directMode) { BYTE const wlByte = ip[pos++]; U32 const windowLog = (wlByte >> 3) + ZSTDv07_WINDOWLOG_ABSOLUTEMIN; if (windowLog > ZSTDv07_WINDOWLOG_MAX) return ERROR(frameParameter_unsupported); windowSize = (1U << windowLog); windowSize += (windowSize >> 3) * (wlByte&7); } switch(dictIDSizeCode) { default: /* impossible */ case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; } switch(fcsID) { default: /* impossible */ case 0 : if (directMode) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; case 3 : frameContentSize = MEM_readLE64(ip+pos); break; } if (!windowSize) windowSize = (U32)frameContentSize; if (windowSize > windowSizeMax) return ERROR(frameParameter_unsupported); fparamsPtr->frameContentSize = frameContentSize; fparamsPtr->windowSize = windowSize; fparamsPtr->dictID = dictID; fparamsPtr->checksumFlag = checksumFlag; } return 0; } /** ZSTDv07_getDecompressedSize() : * compatible with legacy mode * @return : decompressed size if known, 0 otherwise note : 0 can mean any of the following : - decompressed size is not provided within frame header - frame header unknown / not supported - frame header not completely provided (`srcSize` too small) */ unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize) { ZSTDv07_frameParams fparams; size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize); if (frResult!=0) return 0; return fparams.frameContentSize; } /** ZSTDv07_decodeFrameHeader() : * `srcSize` must be the size provided by ZSTDv07_frameHeaderSize(). * @return : 0 if success, or an error code, which can be tested using ZSTDv07_isError() */ static size_t ZSTDv07_decodeFrameHeader(ZSTDv07_DCtx* dctx, const void* src, size_t srcSize) { size_t const result = ZSTDv07_getFrameParams(&(dctx->fParams), src, srcSize); if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong); if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); return result; } typedef struct { blockType_t blockType; U32 origSize; } blockProperties_t; /*! ZSTDv07_getcBlockSize() : * Provides the size of compressed block from block header `src` */ static size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { const BYTE* const in = (const BYTE* const)src; U32 cSize; if (srcSize < ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong); bpPtr->blockType = (blockType_t)((*in) >> 6); cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; if (bpPtr->blockType == bt_end) return 0; if (bpPtr->blockType == bt_rle) return 1; return cSize; } static size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); if (srcSize > 0) { memcpy(dst, src, srcSize); } return srcSize; } /*! ZSTDv07_decodeLiteralsBlock() : @return : nb of bytes read from src (< srcSize ) */ static size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx, const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ { const BYTE* const istart = (const BYTE*) src; if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); switch((litBlockType_t)(istart[0]>> 6)) { case lbt_huffman: { size_t litSize, litCSize, singleStream=0; U32 lhSize = (istart[0] >> 4) & 3; if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */ switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ /* 2 - 2 - 10 - 10 */ lhSize=3; singleStream = istart[0] & 16; litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2); litCSize = ((istart[1] & 3) << 8) + istart[2]; break; case 2: /* 2 - 2 - 14 - 14 */ lhSize=4; litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6); litCSize = ((istart[2] & 63) << 8) + istart[3]; break; case 3: /* 2 - 2 - 18 - 18 */ lhSize=5; litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2); litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4]; break; } if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); if (HUFv07_isError(singleStream ? HUFv07_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) : HUFv07_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) )) return ERROR(corruption_detected); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; dctx->litEntropy = 1; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case lbt_repeat: { size_t litSize, litCSize; U32 lhSize = ((istart[0]) >> 4) & 3; if (lhSize != 1) /* only case supported for now : small litSize, single stream */ return ERROR(corruption_detected); if (dctx->litEntropy==0) return ERROR(dictionary_corrupted); /* 2 - 2 - 10 - 10 */ lhSize=3; litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2); litCSize = ((istart[1] & 3) << 8) + istart[2]; if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); { size_t const errorCode = HUFv07_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTable); if (HUFv07_isError(errorCode)) return ERROR(corruption_detected); } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case lbt_raw: { size_t litSize; U32 lhSize = ((istart[0]) >> 4) & 3; switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ lhSize=1; litSize = istart[0] & 31; break; case 2: litSize = ((istart[0] & 15) << 8) + istart[1]; break; case 3: litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2]; break; } if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ if (litSize+lhSize > srcSize) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart+lhSize, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; return lhSize+litSize; } case lbt_rle: { size_t litSize; U32 lhSize = ((istart[0]) >> 4) & 3; switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ lhSize = 1; litSize = istart[0] & 31; break; case 2: litSize = ((istart[0] & 15) << 8) + istart[1]; break; case 3: litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2]; if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ break; } if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; } default: return ERROR(corruption_detected); /* impossible */ } } /*! ZSTDv07_buildSeqTable() : @return : nb bytes read from src, or an error code if it fails, testable with ZSTDv07_isError() */ static size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog, const void* src, size_t srcSize, const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable) { switch(type) { case FSEv07_ENCODING_RLE : if (!srcSize) return ERROR(srcSize_wrong); if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected); FSEv07_buildDTable_rle(DTable, *(const BYTE*)src); /* if *src > max, data is corrupted */ return 1; case FSEv07_ENCODING_RAW : FSEv07_buildDTable(DTable, defaultNorm, max, defaultLog); return 0; case FSEv07_ENCODING_STATIC: if (!flagRepeatTable) return ERROR(corruption_detected); return 0; default : /* impossible */ case FSEv07_ENCODING_DYNAMIC : { U32 tableLog; S16 norm[MaxSeq+1]; size_t const headerSize = FSEv07_readNCount(norm, &max, &tableLog, src, srcSize); if (FSEv07_isError(headerSize)) return ERROR(corruption_detected); if (tableLog > maxLog) return ERROR(corruption_detected); FSEv07_buildDTable(DTable, norm, max, tableLog); return headerSize; } } } static size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr, FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; /* check */ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); /* SeqHead */ { int nbSeq = *ip++; if (!nbSeq) { *nbSeqPtr=0; return 1; } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { if (ip+2 > iend) return ERROR(srcSize_wrong); nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; } else { if (ip >= iend) return ERROR(srcSize_wrong); nbSeq = ((nbSeq-0x80)<<8) + *ip++; } } *nbSeqPtr = nbSeq; } /* FSE table descriptors */ if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */ { U32 const LLtype = *ip >> 6; U32 const OFtype = (*ip >> 4) & 3; U32 const MLtype = (*ip >> 2) & 3; ip++; /* Build DTables */ { size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable); if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected); ip += llhSize; } { size_t const ofhSize = ZSTDv07_buildSeqTable(DTableOffb, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable); if (ZSTDv07_isError(ofhSize)) return ERROR(corruption_detected); ip += ofhSize; } { size_t const mlhSize = ZSTDv07_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable); if (ZSTDv07_isError(mlhSize)) return ERROR(corruption_detected); ip += mlhSize; } } return ip-istart; } typedef struct { size_t litLength; size_t matchLength; size_t offset; } seq_t; typedef struct { BITv07_DStream_t DStream; FSEv07_DState_t stateLL; FSEv07_DState_t stateOffb; FSEv07_DState_t stateML; size_t prevOffset[ZSTDv07_REP_INIT]; } seqState_t; static seq_t ZSTDv07_decodeSequence(seqState_t* seqState) { seq_t seq; U32 const llCode = FSEv07_peekSymbol(&(seqState->stateLL)); U32 const mlCode = FSEv07_peekSymbol(&(seqState->stateML)); U32 const ofCode = FSEv07_peekSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */ U32 const llBits = LL_bits[llCode]; U32 const mlBits = ML_bits[mlCode]; U32 const ofBits = ofCode; U32 const totalBits = llBits+mlBits+ofBits; static const U32 LL_base[MaxLL+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; static const U32 ML_base[MaxML+1] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; static const U32 OF_base[MaxOff+1] = { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; /* sequence */ { size_t offset; if (!ofCode) offset = 0; else { offset = OF_base[ofCode] + BITv07_readBits(&(seqState->DStream), ofBits); /* <= (ZSTDv07_WINDOWLOG_MAX-1) bits */ if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream)); } if (ofCode <= 1) { if ((llCode == 0) & (offset <= 1)) offset = 1-offset; if (offset) { size_t const temp = seqState->prevOffset[offset]; if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset = temp; } else { offset = seqState->prevOffset[0]; } } else { seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } seq.offset = offset; } seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BITv07_readBits(&(seqState->DStream), mlBits) : 0); /* <= 16 bits */ if (MEM_32bits() && (mlBits+llBits>24)) BITv07_reloadDStream(&(seqState->DStream)); seq.litLength = LL_base[llCode] + ((llCode>15) ? BITv07_readBits(&(seqState->DStream), llBits) : 0); /* <= 16 bits */ if (MEM_32bits() || (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv07_reloadDStream(&(seqState->DStream)); /* ANS state update */ FSEv07_updateState(&(seqState->stateLL), &(seqState->DStream)); /* <= 9 bits */ FSEv07_updateState(&(seqState->stateML), &(seqState->DStream)); /* <= 9 bits */ if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream)); /* <= 18 bits */ FSEv07_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <= 8 bits */ return seq; } static size_t ZSTDv07_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_w = oend-WILDCOPY_OVERLENGTH; const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; /* check */ if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ /* copy Literals */ ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - base)) { /* offset beyond prefix */ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); match = dictEnd - (base-match); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = base; if (op > oend_w || sequence.matchLength < MINMATCH) { while (op < oMatchEnd) *op++ = *match++; return sequenceLength; } } } /* Requirement: op <= oend_w */ /* match within prefix */ if (sequence.offset < 8) { /* close range match, overlap */ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ int const sub2 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTDv07_copy4(op+4, match); match -= sub2; } else { ZSTDv07_copy8(op, match); } op += 8; match += 8; if (oMatchEnd > oend-(16-MINMATCH)) { if (op < oend_w) { ZSTDv07_wildcopy(op, match, oend_w - op); match += oend_w - op; op = oend_w; } while (op < oMatchEnd) *op++ = *match++; } else { ZSTDv07_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ } return sequenceLength; } static size_t ZSTDv07_decompressSequences( ZSTDv07_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; FSEv07_DTable* DTableLL = dctx->LLTable; FSEv07_DTable* DTableML = dctx->MLTable; FSEv07_DTable* DTableOffb = dctx->OffTable; const BYTE* const base = (const BYTE*) (dctx->base); const BYTE* const vBase = (const BYTE*) (dctx->vBase); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); int nbSeq; /* Build Decoding Tables */ { size_t const seqHSize = ZSTDv07_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->fseEntropy, ip, seqSize); if (ZSTDv07_isError(seqHSize)) return seqHSize; ip += seqHSize; } /* Regen sequences */ if (nbSeq) { seqState_t seqState; dctx->fseEntropy = 1; { U32 i; for (i=0; irep[i]; } { size_t const errorCode = BITv07_initDStream(&(seqState.DStream), ip, iend-ip); if (ERR_isError(errorCode)) return ERROR(corruption_detected); } FSEv07_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); FSEv07_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); FSEv07_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); for ( ; (BITv07_reloadDStream(&(seqState.DStream)) <= BITv07_DStream_completed) && nbSeq ; ) { nbSeq--; { seq_t const sequence = ZSTDv07_decodeSequence(&seqState); size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); if (ZSTDv07_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } } /* check if reached exact end */ if (nbSeq) return ERROR(corruption_detected); /* save reps for next block */ { U32 i; for (i=0; irep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; /* if (litPtr > litEnd) return ERROR(corruption_detected); */ /* too many literals already used */ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); if (lastLLSize > 0) { memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static void ZSTDv07_checkContinuity(ZSTDv07_DCtx* dctx, const void* dst) { if (dst != dctx->previousDstEnd) { /* not contiguous */ dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dst; dctx->previousDstEnd = dst; } } static size_t ZSTDv07_decompressBlock_internal(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; if (srcSize >= ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong); /* Decode literals sub-block */ { size_t const litCSize = ZSTDv07_decodeLiteralsBlock(dctx, src, srcSize); if (ZSTDv07_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; } return ZSTDv07_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); } size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t dSize; ZSTDv07_checkContinuity(dctx, dst); dSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } /** ZSTDv07_insertBlock() : insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize) { ZSTDv07_checkContinuity(dctx, blockStart); dctx->previousDstEnd = (const char*)blockStart + blockSize; return blockSize; } static size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) { if (length > dstCapacity) return ERROR(dstSize_tooSmall); if (length > 0) { memset(dst, byte, length); } return length; } /*! ZSTDv07_decompressFrame() : * `dctx` must be properly initialized */ static size_t ZSTDv07_decompressFrame(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* const iend = ip + srcSize; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t remainingSize = srcSize; /* check */ if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong); /* Frame Header */ { size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min); if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize; if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong); if (ZSTDv07_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected); ip += frameHeaderSize; remainingSize -= frameHeaderSize; } /* Loop on each block */ while (1) { size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, iend-ip, &blockProperties); if (ZSTDv07_isError(cBlockSize)) return cBlockSize; ip += ZSTDv07_blockHeaderSize; remainingSize -= ZSTDv07_blockHeaderSize; if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: decodedSize = ZSTDv07_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize); break; case bt_raw : decodedSize = ZSTDv07_copyRawBlock(op, oend-op, ip, cBlockSize); break; case bt_rle : decodedSize = ZSTDv07_generateNxBytes(op, oend-op, *ip, blockProperties.origSize); break; case bt_end : /* end of frame */ if (remainingSize) return ERROR(srcSize_wrong); decodedSize = 0; break; default: return ERROR(GENERIC); /* impossible */ } if (blockProperties.blockType == bt_end) break; /* bt_end */ if (ZSTDv07_isError(decodedSize)) return decodedSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize); op += decodedSize; ip += cBlockSize; remainingSize -= cBlockSize; } return op-ostart; } /*! ZSTDv07_decompress_usingPreparedDCtx() : * Same as ZSTDv07_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded. * It avoids reloading the dictionary each time. * `preparedDCtx` must have been properly initialized using ZSTDv07_decompressBegin_usingDict(). * Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */ static size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { ZSTDv07_copyDCtx(dctx, refDCtx); ZSTDv07_checkContinuity(dctx, dst); return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize); } size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize) { ZSTDv07_decompressBegin_usingDict(dctx, dict, dictSize); ZSTDv07_checkContinuity(dctx, dst); return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize); } size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return ZSTDv07_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); } size_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { #if defined(ZSTDv07_HEAPMODE) && (ZSTDv07_HEAPMODE==1) size_t regenSize; ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx(); if (dctx==NULL) return ERROR(memory_allocation); regenSize = ZSTDv07_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTDv07_freeDCtx(dctx); return regenSize; #else /* stack mode */ ZSTDv07_DCtx dctx; return ZSTDv07_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); #endif } /* ZSTD_errorFrameSizeInfoLegacy() : assumes `cSize` and `dBound` are _not_ NULL */ static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) { *cSize = ret; *dBound = ZSTD_CONTENTSIZE_ERROR; } void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) { const BYTE* ip = (const BYTE*)src; size_t remainingSize = srcSize; size_t nbBlocks = 0; /* check */ if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } /* Frame Header */ { size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, srcSize); if (ZSTDv07_isError(frameHeaderSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize); return; } if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); return; } if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } ip += frameHeaderSize; remainingSize -= frameHeaderSize; } /* Loop on each block */ while (1) { blockProperties_t blockProperties; size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTDv07_isError(cBlockSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize); return; } ip += ZSTDv07_blockHeaderSize; remainingSize -= ZSTDv07_blockHeaderSize; if (blockProperties.blockType == bt_end) break; if (cBlockSize > remainingSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } ip += cBlockSize; remainingSize -= cBlockSize; nbBlocks++; } *cSize = ip - (const BYTE*)src; *dBound = nbBlocks * ZSTDv07_BLOCKSIZE_ABSOLUTEMAX; } /*_****************************** * Streaming Decompression API ********************************/ size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx) { return dctx->expected; } int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /** ZSTDv07_decompressContinue() : * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) * or an error code, which can be tested using ZSTDv07_isError() */ size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { /* Sanity check */ if (srcSize != dctx->expected) return ERROR(srcSize_wrong); if (dstCapacity) ZSTDv07_checkContinuity(dctx, dst); switch (dctx->stage) { case ZSTDds_getFrameHeaderSize : if (srcSize != ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) { memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min); dctx->expected = ZSTDv07_skippableHeaderSize - ZSTDv07_frameHeaderSize_min; /* magic number + skippable frame length */ dctx->stage = ZSTDds_decodeSkippableHeader; return 0; } dctx->headerSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min); if (ZSTDv07_isError(dctx->headerSize)) return dctx->headerSize; memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min); if (dctx->headerSize > ZSTDv07_frameHeaderSize_min) { dctx->expected = dctx->headerSize - ZSTDv07_frameHeaderSize_min; dctx->stage = ZSTDds_decodeFrameHeader; return 0; } dctx->expected = 0; /* not necessary to copy more */ /* fall-through */ case ZSTDds_decodeFrameHeader: { size_t result; memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected); result = ZSTDv07_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize); if (ZSTDv07_isError(result)) return result; dctx->expected = ZSTDv07_blockHeaderSize; dctx->stage = ZSTDds_decodeBlockHeader; return 0; } case ZSTDds_decodeBlockHeader: { blockProperties_t bp; size_t const cBlockSize = ZSTDv07_getcBlockSize(src, ZSTDv07_blockHeaderSize, &bp); if (ZSTDv07_isError(cBlockSize)) return cBlockSize; if (bp.blockType == bt_end) { if (dctx->fParams.checksumFlag) { U64 const h64 = XXH64_digest(&dctx->xxhState); U32 const h32 = (U32)(h64>>11) & ((1<<22)-1); const BYTE* const ip = (const BYTE*)src; U32 const check32 = ip[2] + (ip[1] << 8) + ((ip[0] & 0x3F) << 16); if (check32 != h32) return ERROR(checksum_wrong); } dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; } else { dctx->expected = cBlockSize; dctx->bType = bp.blockType; dctx->stage = ZSTDds_decompressBlock; } return 0; } case ZSTDds_decompressBlock: { size_t rSize; switch(dctx->bType) { case bt_compressed: rSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break; case bt_raw : rSize = ZSTDv07_copyRawBlock(dst, dstCapacity, src, srcSize); break; case bt_rle : return ERROR(GENERIC); /* not yet handled */ break; case bt_end : /* should never happen (filtered at phase 1) */ rSize = 0; break; default: return ERROR(GENERIC); /* impossible */ } dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTDv07_blockHeaderSize; dctx->previousDstEnd = (char*)dst + rSize; if (ZSTDv07_isError(rSize)) return rSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); return rSize; } case ZSTDds_decodeSkippableHeader: { memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected); dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); dctx->stage = ZSTDds_skipFrame; return 0; } case ZSTDds_skipFrame: { dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; } default: return ERROR(GENERIC); /* impossible */ } } static size_t ZSTDv07_refDictContent(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize) { dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dict; dctx->previousDstEnd = (const char*)dict + dictSize; return 0; } static size_t ZSTDv07_loadEntropy(ZSTDv07_DCtx* dctx, const void* const dict, size_t const dictSize) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; { size_t const hSize = HUFv07_readDTableX4(dctx->hufTable, dict, dictSize); if (HUFv07_isError(hSize)) return ERROR(dictionary_corrupted); dictPtr += hSize; } { short offcodeNCount[MaxOff+1]; U32 offcodeMaxValue=MaxOff, offcodeLog; size_t const offcodeHeaderSize = FSEv07_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); if (FSEv07_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); { size_t const errorCode = FSEv07_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog); if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); } dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSEv07_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); if (FSEv07_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); { size_t const errorCode = FSEv07_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog); if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); } dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSEv07_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); if (FSEv07_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); { size_t const errorCode = FSEv07_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog); if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); } dictPtr += litlengthHeaderSize; } if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted); dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted); dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted); dictPtr += 12; dctx->litEntropy = dctx->fseEntropy = 1; return dictPtr - (const BYTE*)dict; } static size_t ZSTDv07_decompress_insertDictionary(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize) { if (dictSize < 8) return ZSTDv07_refDictContent(dctx, dict, dictSize); { U32 const magic = MEM_readLE32(dict); if (magic != ZSTDv07_DICT_MAGIC) { return ZSTDv07_refDictContent(dctx, dict, dictSize); /* pure content mode */ } } dctx->dictID = MEM_readLE32((const char*)dict + 4); /* load entropy tables */ dict = (const char*)dict + 8; dictSize -= 8; { size_t const eSize = ZSTDv07_loadEntropy(dctx, dict, dictSize); if (ZSTDv07_isError(eSize)) return ERROR(dictionary_corrupted); dict = (const char*)dict + eSize; dictSize -= eSize; } /* reference dictionary content */ return ZSTDv07_refDictContent(dctx, dict, dictSize); } size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize) { { size_t const errorCode = ZSTDv07_decompressBegin(dctx); if (ZSTDv07_isError(errorCode)) return errorCode; } if (dict && dictSize) { size_t const errorCode = ZSTDv07_decompress_insertDictionary(dctx, dict, dictSize); if (ZSTDv07_isError(errorCode)) return ERROR(dictionary_corrupted); } return 0; } struct ZSTDv07_DDict_s { void* dict; size_t dictSize; ZSTDv07_DCtx* refContext; }; /* typedef'd tp ZSTDv07_CDict within zstd.h */ static ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem) { if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; if (!customMem.customAlloc || !customMem.customFree) return NULL; { ZSTDv07_DDict* const ddict = (ZSTDv07_DDict*) customMem.customAlloc(customMem.opaque, sizeof(*ddict)); void* const dictContent = customMem.customAlloc(customMem.opaque, dictSize); ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx_advanced(customMem); if (!dictContent || !ddict || !dctx) { customMem.customFree(customMem.opaque, dictContent); customMem.customFree(customMem.opaque, ddict); customMem.customFree(customMem.opaque, dctx); return NULL; } memcpy(dictContent, dict, dictSize); { size_t const errorCode = ZSTDv07_decompressBegin_usingDict(dctx, dictContent, dictSize); if (ZSTDv07_isError(errorCode)) { customMem.customFree(customMem.opaque, dictContent); customMem.customFree(customMem.opaque, ddict); customMem.customFree(customMem.opaque, dctx); return NULL; } } ddict->dict = dictContent; ddict->dictSize = dictSize; ddict->refContext = dctx; return ddict; } } /*! ZSTDv07_createDDict() : * Create a digested dictionary, ready to start decompression without startup delay. * `dict` can be released after `ZSTDv07_DDict` creation */ ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize) { ZSTDv07_customMem const allocator = { NULL, NULL, NULL }; return ZSTDv07_createDDict_advanced(dict, dictSize, allocator); } size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict) { ZSTDv07_freeFunction const cFree = ddict->refContext->customMem.customFree; void* const opaque = ddict->refContext->customMem.opaque; ZSTDv07_freeDCtx(ddict->refContext); cFree(opaque, ddict->dict); cFree(opaque, ddict); return 0; } /*! ZSTDv07_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Use dictionary without significant overhead. */ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTDv07_DDict* ddict) { return ZSTDv07_decompress_usingPreparedDCtx(dctx, ddict->refContext, dst, dstCapacity, src, srcSize); } /* Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : http://www.zstd.net/ */ /*-*************************************************************************** * Streaming decompression howto * * A ZBUFFv07_DCtx object is required to track streaming operations. * Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources. * Use ZBUFFv07_decompressInit() to start a new decompression operation, * or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv07_DCtx objects can be re-init multiple times. * * Use ZBUFFv07_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency), * or 0 when a frame is completely decoded, * or an error code, which can be tested using ZBUFFv07_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize() * output : ZBUFFv07_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv07_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ typedef enum { ZBUFFds_init, ZBUFFds_loadHeader, ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv07_dStage; /* *** Resource management *** */ struct ZBUFFv07_DCtx_s { ZSTDv07_DCtx* zd; ZSTDv07_frameParams fParams; ZBUFFv07_dStage stage; char* inBuff; size_t inBuffSize; size_t inPos; char* outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t blockSize; BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX]; size_t lhSize; ZSTDv07_customMem customMem; }; /* typedef'd to ZBUFFv07_DCtx within "zstd_buffered.h" */ ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem); ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void) { return ZBUFFv07_createDCtx_advanced(defaultCustomMem); } ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem) { ZBUFFv07_DCtx* zbd; if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; if (!customMem.customAlloc || !customMem.customFree) return NULL; zbd = (ZBUFFv07_DCtx*)customMem.customAlloc(customMem.opaque, sizeof(ZBUFFv07_DCtx)); if (zbd==NULL) return NULL; memset(zbd, 0, sizeof(ZBUFFv07_DCtx)); memcpy(&zbd->customMem, &customMem, sizeof(ZSTDv07_customMem)); zbd->zd = ZSTDv07_createDCtx_advanced(customMem); if (zbd->zd == NULL) { ZBUFFv07_freeDCtx(zbd); return NULL; } zbd->stage = ZBUFFds_init; return zbd; } size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* zbd) { if (zbd==NULL) return 0; /* support free on null */ ZSTDv07_freeDCtx(zbd->zd); if (zbd->inBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff); if (zbd->outBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff); zbd->customMem.customFree(zbd->customMem.opaque, zbd); return 0; } /* *** Initialization *** */ size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* zbd, const void* dict, size_t dictSize) { zbd->stage = ZBUFFds_loadHeader; zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0; return ZSTDv07_decompressBegin_usingDict(zbd->zd, dict, dictSize); } size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* zbd) { return ZBUFFv07_decompressInitDictionary(zbd, NULL, 0); } /* internal util function */ MEM_STATIC size_t ZBUFFv07_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const length = MIN(dstCapacity, srcSize); if (length > 0) { memcpy(dst, src, length); } return length; } /* *** Decompression *** */ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr) { const char* const istart = (const char*)src; const char* const iend = istart + *srcSizePtr; const char* ip = istart; char* const ostart = (char*)dst; char* const oend = ostart + *dstCapacityPtr; char* op = ostart; U32 notDone = 1; while (notDone) { switch(zbd->stage) { case ZBUFFds_init : return ERROR(init_missing); case ZBUFFds_loadHeader : { size_t const hSize = ZSTDv07_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize); if (ZSTDv07_isError(hSize)) return hSize; if (hSize != 0) { size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize; /* remaining header bytes + next block header */ } memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad; break; } } /* Consume header */ { size_t const h1Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd); /* == ZSTDv07_frameHeaderSize_min */ size_t const h1Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size); if (ZSTDv07_isError(h1Result)) return h1Result; if (h1Size < zbd->lhSize) { /* long header */ size_t const h2Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd); size_t const h2Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size); if (ZSTDv07_isError(h2Result)) return h2Result; } } zbd->fParams.windowSize = MAX(zbd->fParams.windowSize, 1U << ZSTDv07_WINDOWLOG_ABSOLUTEMIN); /* Frame header instruct buffer sizes */ { size_t const blockSize = MIN(zbd->fParams.windowSize, ZSTDv07_BLOCKSIZE_ABSOLUTEMAX); zbd->blockSize = blockSize; if (zbd->inBuffSize < blockSize) { zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff); zbd->inBuffSize = blockSize; zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize); if (zbd->inBuff == NULL) return ERROR(memory_allocation); } { size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2; if (zbd->outBuffSize < neededOutSize) { zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff); zbd->outBuffSize = neededOutSize; zbd->outBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, neededOutSize); if (zbd->outBuff == NULL) return ERROR(memory_allocation); } } } zbd->stage = ZBUFFds_read; /* pass-through */ /* fall-through */ case ZBUFFds_read: { size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd); if (neededInSize==0) { /* end of frame */ zbd->stage = ZBUFFds_init; notDone = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd); size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd, zbd->outBuff + zbd->outStart, (isSkipFrame ? 0 : zbd->outBuffSize - zbd->outStart), ip, neededInSize); if (ZSTDv07_isError(decodedSize)) return decodedSize; ip += neededInSize; if (!decodedSize && !isSkipFrame) break; /* this was just a header */ zbd->outEnd = zbd->outStart + decodedSize; zbd->stage = ZBUFFds_flush; break; } if (ip==iend) { notDone = 0; break; } /* no more input */ zbd->stage = ZBUFFds_load; } /* fall-through */ case ZBUFFds_load: { size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd); size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */ size_t loadedSize; if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected); /* should never happen */ loadedSize = ZBUFFv07_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip); ip += loadedSize; zbd->inPos += loadedSize; if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ { const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd); size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd, zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart, zbd->inBuff, neededInSize); if (ZSTDv07_isError(decodedSize)) return decodedSize; zbd->inPos = 0; /* input is consumed */ if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */ zbd->outEnd = zbd->outStart + decodedSize; zbd->stage = ZBUFFds_flush; /* break; */ /* pass-through */ } } /* fall-through */ case ZBUFFds_flush: { size_t const toFlushSize = zbd->outEnd - zbd->outStart; size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize); op += flushedSize; zbd->outStart += flushedSize; if (flushedSize == toFlushSize) { zbd->stage = ZBUFFds_read; if (zbd->outStart + zbd->blockSize > zbd->outBuffSize) zbd->outStart = zbd->outEnd = 0; break; } /* cannot flush everything */ notDone = 0; break; } default: return ERROR(GENERIC); /* impossible */ } } /* result */ *srcSizePtr = ip-istart; *dstCapacityPtr = op-ostart; { size_t nextSrcSizeHint = ZSTDv07_nextSrcSizeToDecompress(zbd->zd); nextSrcSizeHint -= zbd->inPos; /* already loaded*/ return nextSrcSizeHint; } } /* ************************************* * Tool functions ***************************************/ size_t ZBUFFv07_recommendedDInSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + ZSTDv07_blockHeaderSize /* block header size*/ ; } size_t ZBUFFv07_recommendedDOutSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v02.h0000644000175000017500000000720113771325506024420 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V02_H_4174539423 #define ZSTD_V02_H_4174539423 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv02_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.2.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error */ unsigned ZSTDv02_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx; ZSTDv02_Dctx* ZSTDv02_createDCtx(void); size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx); size_t ZSTDv02_decompressDCtx(void* ctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Streaming functions ***************************************/ size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx); size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx); size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv02_magicNumber 0xFD2FB522 /* v0.2 */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V02_H_4174539423 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v06.c0000644000175000017500000050437613771325506024436 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*- Dependencies -*/ #include "zstd_v06.h" #include /* size_t, ptrdiff_t */ #include /* memcpy */ #include /* malloc, free, qsort */ #include "../common/error_private.h" /* ****************************************************************** mem.h low-level memory access routines Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * Compiler specifics ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif #if defined(__GNUC__) # define MEM_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /*-************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard, by lying on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_ulong(in); #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap32(in); #else return ((in << 24) & 0xff000000 ) | ((in << 8) & 0x00ff0000 ) | ((in >> 8) & 0x0000ff00 ) | ((in >> 24) & 0x000000ff ); #endif } MEM_STATIC U64 MEM_swap64(U64 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_uint64(in); #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap64(in); #else return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | ((in >> 8) & 0x00000000ff000000ULL) | ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); #endif } /*=== Little endian r/w ===*/ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else return MEM_swap32(MEM_read32(memPtr)); } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else return MEM_swap64(MEM_read64(memPtr)); } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /* zstd - standard compression library Header File for static linking only Copyright (C) 2014-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : http://www.zstd.net */ #ifndef ZSTDv06_STATIC_H #define ZSTDv06_STATIC_H /* The prototypes defined within this file are considered experimental. * They should not be used in the context DLL as they may change in the future. * Prefer static linking if you need them, to control breaking version changes issues. */ #if defined (__cplusplus) extern "C" { #endif /*- Advanced Decompression functions -*/ /*! ZSTDv06_decompress_usingPreparedDCtx() : * Same as ZSTDv06_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded. * It avoids reloading the dictionary each time. * `preparedDCtx` must have been properly initialized using ZSTDv06_decompressBegin_usingDict(). * Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */ ZSTDLIBv06_API size_t ZSTDv06_decompress_usingPreparedDCtx( ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); #define ZSTDv06_FRAMEHEADERSIZE_MAX 13 /* for static allocation */ static const size_t ZSTDv06_frameHeaderSize_min = 5; static const size_t ZSTDv06_frameHeaderSize_max = ZSTDv06_FRAMEHEADERSIZE_MAX; ZSTDLIBv06_API size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx); /* Streaming decompression, direct mode (bufferless) A ZSTDv06_DCtx object is required to track streaming operations. Use ZSTDv06_createDCtx() / ZSTDv06_freeDCtx() to manage it. A ZSTDv06_DCtx object can be re-used multiple times. First optional operation is to retrieve frame parameters, using ZSTDv06_getFrameParams(), which doesn't consume the input. It can provide the minimum size of rolling buffer required to properly decompress data, and optionally the final size of uncompressed content. (Note : content size is an optional info that may not be present. 0 means : content size unknown) Frame parameters are extracted from the beginning of compressed frame. The amount of data to read is variable, from ZSTDv06_frameHeaderSize_min to ZSTDv06_frameHeaderSize_max (so if `srcSize` >= ZSTDv06_frameHeaderSize_max, it will always work) If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result. Result : 0 when successful, it means the ZSTDv06_frameParams structure has been filled. >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header. errorCode, which can be tested using ZSTDv06_isError() Start decompression, with ZSTDv06_decompressBegin() or ZSTDv06_decompressBegin_usingDict(). Alternatively, you can copy a prepared context, using ZSTDv06_copyDCtx(). Then use ZSTDv06_nextSrcSizeToDecompress() and ZSTDv06_decompressContinue() alternatively. ZSTDv06_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv06_decompressContinue(). ZSTDv06_decompressContinue() requires this exact amount of bytes, or it will fail. ZSTDv06_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog). They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible. @result of ZSTDv06_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity) It can be zero, which is not an error; it just means ZSTDv06_decompressContinue() has decoded some header. A frame is fully decoded when ZSTDv06_nextSrcSizeToDecompress() returns zero. Context can then be reset to start a new decompression. */ /* ************************************** * Block functions ****************************************/ /*! Block functions produce and decode raw zstd blocks, without frame metadata. User will have to take in charge required information to regenerate data, such as compressed and content sizes. A few rules to respect : - Uncompressed block size must be <= ZSTDv06_BLOCKSIZE_MAX (128 KB) - Compressing or decompressing requires a context structure + Use ZSTDv06_createCCtx() and ZSTDv06_createDCtx() - It is necessary to init context before starting + compression : ZSTDv06_compressBegin() + decompression : ZSTDv06_decompressBegin() + variants _usingDict() are also allowed + copyCCtx() and copyDCtx() work too - When a block is considered not compressible enough, ZSTDv06_compressBlock() result will be zero. In which case, nothing is produced into `dst`. + User must test for such outcome and deal directly with uncompressed data + ZSTDv06_decompressBlock() doesn't accept uncompressed data as input !! */ #define ZSTDv06_BLOCKSIZE_MAX (128 * 1024) /* define, for static allocation */ ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTDv06_STATIC_H */ /* zstd_internal - common functions to include Header File for include Copyright (C) 2014-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : https://www.zstd.net */ #ifndef ZSTDv06_CCOMMON_H_MODULE #define ZSTDv06_CCOMMON_H_MODULE /*-************************************* * Common macros ***************************************/ #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) /*-************************************* * Common constants ***************************************/ #define ZSTDv06_DICT_MAGIC 0xEC30A436 #define ZSTDv06_REP_NUM 3 #define ZSTDv06_REP_INIT ZSTDv06_REP_NUM #define ZSTDv06_REP_MOVE (ZSTDv06_REP_NUM-1) #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define BIT1 2 #define BIT0 1 #define ZSTDv06_WINDOWLOG_ABSOLUTEMIN 12 static const size_t ZSTDv06_fcs_fieldSize[4] = { 0, 1, 2, 8 }; #define ZSTDv06_BLOCKHEADERSIZE 3 /* because C standard does not allow a static const value to be defined using another static const value .... :( */ static const size_t ZSTDv06_blockHeaderSize = ZSTDv06_BLOCKHEADERSIZE; typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ #define HufLog 12 #define IS_HUF 0 #define IS_PCH 1 #define IS_RAW 2 #define IS_RLE 3 #define LONGNBSEQ 0x7F00 #define MINMATCH 3 #define EQUAL_READ32 4 #define REPCODE_STARTVALUE 1 #define Litbits 8 #define MaxLit ((1< /* support for bextr (experimental) */ #endif /*-******************************************** * bitStream decoding API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; } BITv06_DStream_t; typedef enum { BITv06_DStream_unfinished = 0, BITv06_DStream_endOfBuffer = 1, BITv06_DStream_completed = 2, BITv06_DStream_overflow = 3 } BITv06_DStream_status; /* result of BITv06_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, unsigned nbBits); MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD); MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* bitD); /*-**************************************** * unsafe API ******************************************/ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /*-************************************************************** * Internal functions ****************************************************************/ MEM_STATIC unsigned BITv06_highbit32 ( U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /*-******************************************************** * bitStream decoding **********************************************************/ /*! BITv06_initDStream() : * Initialize a BITv06_DStream_t. * `bitD` : a pointer to an already allocated BITv06_DStream_t structure. * `srcSize` must be the *exact* size of the bitStream, in bytes. * @return : size of stream (== srcSize) or an errorCode if a problem is detected */ MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); } } else { bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */ default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BITv06_lookBits(const BITv06_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } /*! BITv06_lookBitsFast() : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } MEM_STATIC void BITv06_skipBits(BITv06_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits) { size_t const value = BITv06_lookBits(bitD, nbBits); BITv06_skipBits(bitD, nbBits); return value; } /*! BITv06_readBitsFast() : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits) { size_t const value = BITv06_lookBitsFast(bitD, nbBits); BITv06_skipBits(bitD, nbBits); return value; } MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return BITv06_DStream_overflow; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BITv06_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv06_DStream_endOfBuffer; return BITv06_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; BITv06_DStream_status result = BITv06_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BITv06_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } /*! BITv06_endOfDStream() : * @return Tells if DStream has exactly reached its end (all bits consumed). */ MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ /* ****************************************************************** FSE : Finite State Entropy coder header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSEv06_STATIC_H #define FSEv06_STATIC_H #if defined (__cplusplus) extern "C" { #endif /* ***************************************** * Static allocation *******************************************/ /* FSE buffer bounds */ #define FSEv06_NCOUNTBOUND 512 #define FSEv06_BLOCKBOUND(size) (size + (size>>7)) #define FSEv06_COMPRESSBOUND(size) (FSEv06_NCOUNTBOUND + FSEv06_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */ #define FSEv06_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ /* ***************************************** * Implementation of inlined functions *******************************************/ /* ====== Decompression ====== */ typedef struct { U16 tableLog; U16 fastMode; } FSEv06_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSEv06_decode_t; /* size == U32 */ MEM_STATIC void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt) { const void* ptr = dt; const FSEv06_DTableHeader* const DTableH = (const FSEv06_DTableHeader*)ptr; DStatePtr->state = BITv06_readBits(bitD, DTableH->tableLog); BITv06_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSEv06_peekSymbol(const FSEv06_DState_t* DStatePtr) { FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state]; return DInfo.symbol; } MEM_STATIC void FSEv06_updateState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD) { FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; size_t const lowBits = BITv06_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; } MEM_STATIC BYTE FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD) { FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BITv06_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } /*! FSEv06_decodeSymbolFast() : unsafe, only works if no symbol has a probability > 50% */ MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD) { FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BITv06_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } #ifndef FSEv06_COMMONDEFS_ONLY /* ************************************************************** * Tuning parameters ****************************************************************/ /*!MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSEv06_MAX_MEMORY_USAGE 14 #define FSEv06_DEFAULT_MEMORY_USAGE 13 /*!FSEv06_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSEv06_MAX_SYMBOL_VALUE 255 /* ************************************************************** * template functions type & suffix ****************************************************************/ #define FSEv06_FUNCTION_TYPE BYTE #define FSEv06_FUNCTION_EXTENSION #define FSEv06_DECODE_TYPE FSEv06_decode_t #endif /* !FSEv06_COMMONDEFS_ONLY */ /* *************************************************************** * Constants *****************************************************************/ #define FSEv06_MAX_TABLELOG (FSEv06_MAX_MEMORY_USAGE-2) #define FSEv06_MAX_TABLESIZE (1U< FSEv06_TABLELOG_ABSOLUTE_MAX #error "FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX is not supported" #endif #define FSEv06_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) #if defined (__cplusplus) } #endif #endif /* FSEv06_STATIC_H */ /* Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c *************************************************************************** */ /*-**************************************** * FSE Error Management ******************************************/ unsigned FSEv06_isError(size_t code) { return ERR_isError(code); } const char* FSEv06_getErrorName(size_t code) { return ERR_getErrorName(code); } /* ************************************************************** * HUF Error Management ****************************************************************/ static unsigned HUFv06_isError(size_t code) { return ERR_isError(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ static short FSEv06_abs(short a) { return a<0 ? -a : a; } size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { const BYTE* const istart = (const BYTE*) headerBuffer; const BYTE* const iend = istart + hbSize; const BYTE* ip = istart; int nbBits; int remaining; int threshold; U32 bitStream; int bitCount; unsigned charnum = 0; int previous0 = 0; if (hbSize < 4) return ERROR(srcSize_wrong); bitStream = MEM_readLE32(ip); nbBits = (bitStream & 0xF) + FSEv06_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSEv06_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { short const max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSEv06_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } /* while ((remaining>1) && (charnum<=*maxSVPtr)) */ if (remaining != 1) return ERROR(GENERIC); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); return ip-istart; } /* ****************************************************************** FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /* ************************************************************** * Error Management ****************************************************************/ #define FSEv06_isError ERR_isError #define FSEv06_STATIC_ASSERT(c) { enum { FSEv06_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************************************** * Complex types ****************************************************************/ typedef U32 DTable_max_t[FSEv06_DTABLE_SIZE_U32(FSEv06_MAX_TABLELOG)]; /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSEv06_FUNCTION_EXTENSION # error "FSEv06_FUNCTION_EXTENSION must be defined" #endif #ifndef FSEv06_FUNCTION_TYPE # error "FSEv06_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSEv06_CAT(X,Y) X##Y #define FSEv06_FUNCTION_NAME(X,Y) FSEv06_CAT(X,Y) #define FSEv06_TYPE_NAME(X,Y) FSEv06_CAT(X,Y) /* Function templates */ FSEv06_DTable* FSEv06_createDTable (unsigned tableLog) { if (tableLog > FSEv06_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv06_TABLELOG_ABSOLUTE_MAX; return (FSEv06_DTable*)malloc( FSEv06_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); } void FSEv06_freeDTable (FSEv06_DTable* dt) { free(dt); } size_t FSEv06_buildDTable(FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ FSEv06_DECODE_TYPE* const tableDecode = (FSEv06_DECODE_TYPE*) (tdPtr); U16 symbolNext[FSEv06_MAX_SYMBOL_VALUE+1]; U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; U32 highThreshold = tableSize-1; /* Sanity Checks */ if (maxSymbolValue > FSEv06_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSEv06_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ { FSEv06_DTableHeader DTableH; DTableH.tableLog = (U16)tableLog; DTableH.fastMode = 1; { S16 const largeLimit= (S16)(1 << (tableLog-1)); U32 s; for (s=0; s= largeLimit) DTableH.fastMode=0; symbolNext[s] = normalizedCounter[s]; } } } memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ { U32 const tableMask = tableSize-1; U32 const step = FSEv06_TABLESTEP(tableSize); U32 s, position = 0; for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } /* Build Decoding table */ { U32 u; for (u=0; utableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits) { void* ptr = dt; FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr; void* dPtr = dt + 1; FSEv06_decode_t* const dinfo = (FSEv06_decode_t*)dPtr; const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSV1 = tableMask+1; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ BITv06_reloadDStream(&bitD); op[1] = FSEv06_GETSYMBOL(&state2); if (FSEv06_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BITv06_reloadDStream(&bitD) > BITv06_DStream_unfinished) { op+=2; break; } } op[2] = FSEv06_GETSYMBOL(&state1); if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BITv06_reloadDStream(&bitD); op[3] = FSEv06_GETSYMBOL(&state2); } /* tail */ /* note : BITv06_reloadDStream(&bitD) >= FSEv06_DStream_partiallyFilled; Ends at exactly BITv06_DStream_completed */ while (1) { if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSEv06_GETSYMBOL(&state1); if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) { *op++ = FSEv06_GETSYMBOL(&state2); break; } if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSEv06_GETSYMBOL(&state2); if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) { *op++ = FSEv06_GETSYMBOL(&state1); break; } } return op-ostart; } size_t FSEv06_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSEv06_DTable* dt) { const void* ptr = dt; const FSEv06_DTableHeader* DTableH = (const FSEv06_DTableHeader*)ptr; const U32 fastMode = DTableH->fastMode; /* select fast mode (static) */ if (fastMode) return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSEv06_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSEv06_MAX_SYMBOL_VALUE; if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ /* normal FSE decoding mode */ { size_t const NCountLength = FSEv06_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSEv06_isError(NCountLength)) return NCountLength; if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ ip += NCountLength; cSrcSize -= NCountLength; } { size_t const errorCode = FSEv06_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSEv06_isError(errorCode)) return errorCode; } return FSEv06_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); /* always return, even if it is an error code */ } #endif /* FSEv06_COMMONDEFS_ONLY */ /* ****************************************************************** Huffman coder, part of New Generation Entropy library header file Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef HUFv06_H #define HUFv06_H #if defined (__cplusplus) extern "C" { #endif /* **************************************** * HUF simple functions ******************************************/ size_t HUFv06_decompress(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* HUFv06_decompress() : Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'dstSize'. `dstSize` : must be the **exact** size of original (uncompressed) data. Note : in contrast with FSE, HUFv06_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate. @return : size of regenerated data (== dstSize) or an error code, which can be tested using HUFv06_isError() */ /* **************************************** * Tool functions ******************************************/ size_t HUFv06_compressBound(size_t size); /**< maximum compressed size */ #if defined (__cplusplus) } #endif #endif /* HUFv06_H */ /* ****************************************************************** Huffman codec, part of New Generation Entropy library header file, for static linking only Copyright (C) 2013-2016, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef HUFv06_STATIC_H #define HUFv06_STATIC_H #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Static allocation ******************************************/ /* HUF buffer bounds */ #define HUFv06_CTABLEBOUND 129 #define HUFv06_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ #define HUFv06_COMPRESSBOUND(size) (HUFv06_CTABLEBOUND + HUFv06_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of HUF's DTable */ #define HUFv06_DTABLE_SIZE(maxTableLog) (1 + (1< HUFv06_ABSOLUTEMAX_TABLELOG) # error "HUFv06_MAX_TABLELOG is too large !" #endif /*! HUFv06_readStats() : Read compact Huffman tree, saved by HUFv06_writeCTable(). `huffWeight` is destination buffer. @return : size read from `src` */ MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ if (iSize >= (242)) { /* RLE */ static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, hwSize); iSize = 0; } else { /* Incompressible */ oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; { U32 n; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } } else { /* header compressed with FSE (normal case) */ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSEv06_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ if (FSEv06_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUFv06_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); weightTotal = 0; { U32 n; for (n=0; n= HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ { U32 const tableLog = BITv06_highbit32(weightTotal) + 1; if (tableLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; U32 const verif = 1 << BITv06_highbit32(rest); U32 const lastWeight = BITv06_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); return iSize+1; } #if defined (__cplusplus) } #endif #endif /* HUFv06_STATIC_H */ /* ****************************************************************** Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) /* inline is defined */ #elif defined(_MSC_VER) # define inline __inline #else # define inline /* disable inline */ #endif #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Error Management ****************************************************************/ #define HUFv06_STATIC_ASSERT(c) { enum { HUFv06_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ******************************************************* * HUF : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUFv06_DEltX2; /* single-symbol decoding */ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv06_DEltX4; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; /*-***************************/ /* single-symbol decoding */ /*-***************************/ size_t HUFv06_readDTableX2 (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUFv06_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; size_t iSize; U32 nbSymbols = 0; U32 n; U32 nextRankStart; void* const dtPtr = DTable + 1; HUFv06_DEltX2* const dt = (HUFv06_DEltX2*)dtPtr; HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv06_readStats(huffWeight, HUFv06_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUFv06_isError(iSize)) return iSize; /* check result */ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ DTable[0] = (U16)tableLog; /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n> 1; U32 i; HUFv06_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize; } static BYTE HUFv06_decodeSymbolX2(BITv06_DStream_t* Dstream, const HUFv06_DEltX2* dt, const U32 dtLog) { const size_t val = BITv06_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; BITv06_skipBits(Dstream, dt[val].nbBits); return c; } #define HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ *ptr++ = HUFv06_decodeSymbolX2(DStreamPtr, dt, dtLog) #define HUFv06_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \ HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) #define HUFv06_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) static inline size_t HUFv06_decodeStreamX2(BYTE* p, BITv06_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv06_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-4)) { HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr); HUFv06_DECODE_SYMBOLX2_1(p, bitDPtr); HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr); HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to the end */ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd)) HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, hence no need to reload */ while (p < pEnd) HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } size_t HUFv06_decompress1X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { BYTE* op = (BYTE*)dst; BYTE* const oend = op + dstSize; const U32 dtLog = DTable[0]; const void* dtPtr = DTable; const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr)+1; BITv06_DStream_t bitD; { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize); if (HUFv06_isError(errorCode)) return errorCode; } HUFv06_decodeStreamX2(op, &bitD, oend, dt, dtLog); /* check */ if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected); return dstSize; } size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize); if (HUFv06_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUFv06_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } size_t HUFv06_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable; const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BITv06_DStream_t bitD1; BITv06_DStream_t bitD2; BITv06_DStream_t bitD3; BITv06_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BITv06_initDStream(&bitD1, istart1, length1); if (HUFv06_isError(errorCode)) return errorCode; errorCode = BITv06_initDStream(&bitD2, istart2, length2); if (HUFv06_isError(errorCode)) return errorCode; errorCode = BITv06_initDStream(&bitD3, istart3, length3); if (HUFv06_isError(errorCode)) return errorCode; errorCode = BITv06_initDStream(&bitD4, istart4, length4); if (HUFv06_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4); for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) { HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1); HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2); HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3); HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4); HUFv06_DECODE_SYMBOLX2_1(op1, &bitD1); HUFv06_DECODE_SYMBOLX2_1(op2, &bitD2); HUFv06_DECODE_SYMBOLX2_1(op3, &bitD3); HUFv06_DECODE_SYMBOLX2_1(op4, &bitD4); HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1); HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2); HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3); HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4); HUFv06_DECODE_SYMBOLX2_0(op1, &bitD1); HUFv06_DECODE_SYMBOLX2_0(op2, &bitD2); HUFv06_DECODE_SYMBOLX2_0(op3, &bitD3); HUFv06_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUFv06_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUFv06_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUFv06_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUFv06_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize); if (HUFv06_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUFv06_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /* *************************/ /* double-symbols decoding */ /* *************************/ static void HUFv06_fillDTableX4Level2(HUFv06_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUFv06_DEltX4 DElt; U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1]; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ { U32 s; for (s=0; s= 1 */ rankVal[weight] += length; }} } typedef U32 rankVal_t[HUFv06_ABSOLUTEMAX_TABLELOG][HUFv06_ABSOLUTEMAX_TABLELOG + 1]; static void HUFv06_fillDTableX4(HUFv06_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) { /* enough room for a second symbol */ U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUFv06_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { HUFv06_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; { U32 u; const U32 end = start + length; for (u = start; u < end; u++) DTable[u] = DElt; } } rankVal[weight] += length; } } size_t HUFv06_readDTableX4 (U32* DTable, const void* src, size_t srcSize) { BYTE weightList[HUFv06_MAX_SYMBOL_VALUE + 1]; sortedSymbol_t sortedSymbol[HUFv06_MAX_SYMBOL_VALUE + 1]; U32 rankStats[HUFv06_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; U32 rankStart0[HUFv06_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; U32* const rankStart = rankStart0+1; rankVal_t rankVal; U32 tableLog, maxW, sizeOfSort, nbSymbols; const U32 memLog = DTable[0]; size_t iSize; void* dtPtr = DTable; HUFv06_DEltX4* const dt = ((HUFv06_DEltX4*)dtPtr) + 1; HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ if (memLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUFv06_readStats(weightList, HUFv06_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUFv06_isError(iSize)) return iSize; /* check result */ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w> consumed; } } } } HUFv06_fillDTableX4(dt, memLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); return iSize; } static U32 HUFv06_decodeSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog) { const size_t val = BITv06_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BITv06_skipBits(DStream, dt[val].nbBits); return dt[val].length; } static U32 HUFv06_decodeLastSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog) { const size_t val = BITv06_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BITv06_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BITv06_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ } } return 1; } #define HUFv06_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUFv06_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \ ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUFv06_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) static inline size_t HUFv06_decodeStreamX4(BYTE* p, BITv06_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv06_DEltX4* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd-7)) { HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr); HUFv06_DECODE_SYMBOLX4_1(p, bitDPtr); HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr); HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr); } /* closer to the end */ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-2)) HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr); while (p <= pEnd-2) HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUFv06_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); return p-pStart; } size_t HUFv06_decompress1X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U32* DTable) { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const U32 dtLog = DTable[0]; const void* const dtPtr = DTable; const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1; /* Init */ BITv06_DStream_t bitD; { size_t const errorCode = BITv06_initDStream(&bitD, istart, cSrcSize); if (HUFv06_isError(errorCode)) return errorCode; } /* decode */ HUFv06_decodeStreamX4(ostart, &bitD, oend, dt, dtLog); /* check */ if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected); /* decoded size */ return dstSize; } size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize); if (HUFv06_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv06_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } size_t HUFv06_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U32* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable; const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BITv06_DStream_t bitD1; BITv06_DStream_t bitD2; BITv06_DStream_t bitD3; BITv06_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BITv06_initDStream(&bitD1, istart1, length1); if (HUFv06_isError(errorCode)) return errorCode; errorCode = BITv06_initDStream(&bitD2, istart2, length2); if (HUFv06_isError(errorCode)) return errorCode; errorCode = BITv06_initDStream(&bitD3, istart3, length3); if (HUFv06_isError(errorCode)) return errorCode; errorCode = BITv06_initDStream(&bitD4, istart4, length4); if (HUFv06_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4); for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) { HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1); HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2); HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3); HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4); HUFv06_DECODE_SYMBOLX4_1(op1, &bitD1); HUFv06_DECODE_SYMBOLX4_1(op2, &bitD2); HUFv06_DECODE_SYMBOLX4_1(op3, &bitD3); HUFv06_DECODE_SYMBOLX4_1(op4, &bitD4); HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1); HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2); HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3); HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4); HUFv06_DECODE_SYMBOLX4_0(op1, &bitD1); HUFv06_DECODE_SYMBOLX4_0(op2, &bitD2); HUFv06_DECODE_SYMBOLX4_0(op3, &bitD3); HUFv06_DECODE_SYMBOLX4_0(op4, &bitD4); endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUFv06_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); HUFv06_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); HUFv06_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); HUFv06_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize); if (HUFv06_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUFv06_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /* ********************************/ /* Generic decompression selector */ /* ********************************/ typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { /* single, double, quad */ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { static const decompressionAlgo decompress[3] = { HUFv06_decompress4X2, HUFv06_decompress4X4, NULL }; U32 Dtime[3]; /* decompression time estimation */ /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ /* decoder timing evaluation */ { U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ U32 const D256 = (U32)(dstSize >> 8); U32 n; for (n=0; n<3; n++) Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256); } Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */ { U32 algoNb = 0; if (Dtime[1] < Dtime[0]) algoNb = 1; /* if (Dtime[2] < Dtime[algoNb]) algoNb = 2; */ /* current speed of HUFv06_decompress4X6 is not good */ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); } /* return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */ /* return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */ /* return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams quad-symbols decoding */ } /* Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : http://www.zstd.net/ */ /*-**************************************** * Version ******************************************/ /*-**************************************** * ZSTD Error Management ******************************************/ /*! ZSTDv06_isError() : * tells if a return value is an error code */ unsigned ZSTDv06_isError(size_t code) { return ERR_isError(code); } /*! ZSTDv06_getErrorName() : * provides error code string from function result (useful for debugging) */ const char* ZSTDv06_getErrorName(size_t code) { return ERR_getErrorName(code); } /* ************************************************************** * ZBUFF Error Management ****************************************************************/ unsigned ZBUFFv06_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } /* zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : http://www.zstd.net */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * HEAPMODE : * Select how default decompression function ZSTDv06_decompress() will allocate memory, * in memory stack (0), or in memory heap (1, requires malloc()) */ #ifndef ZSTDv06_HEAPMODE # define ZSTDv06_HEAPMODE 1 #endif /*-******************************************************* * Compiler specifics *********************************************************/ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif /*-************************************* * Macros ***************************************/ #define ZSTDv06_isError ERR_isError /* for inlining */ #define FSEv06_isError ERR_isError #define HUFv06_isError ERR_isError /*_******************************************************* * Memory operations **********************************************************/ static void ZSTDv06_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } /*-************************************************************* * Context management ***************************************************************/ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTDv06_dStage; struct ZSTDv06_DCtx_s { FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)]; FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)]; FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)]; unsigned hufTableX4[HUFv06_DTABLE_SIZE(HufLog)]; const void* previousDstEnd; const void* base; const void* vBase; const void* dictEnd; size_t expected; size_t headerSize; ZSTDv06_frameParams fParams; blockType_t bType; /* used in ZSTDv06_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ ZSTDv06_dStage stage; U32 flagRepeatTable; const BYTE* litPtr; size_t litSize; BYTE litBuffer[ZSTDv06_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX]; }; /* typedef'd to ZSTDv06_DCtx within "zstd_static.h" */ size_t ZSTDv06_sizeofDCtx (void); /* Hidden declaration */ size_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); } size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx) { dctx->expected = ZSTDv06_frameHeaderSize_min; dctx->stage = ZSTDds_getFrameHeaderSize; dctx->previousDstEnd = NULL; dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; dctx->hufTableX4[0] = HufLog; dctx->flagRepeatTable = 0; return 0; } ZSTDv06_DCtx* ZSTDv06_createDCtx(void) { ZSTDv06_DCtx* dctx = (ZSTDv06_DCtx*)malloc(sizeof(ZSTDv06_DCtx)); if (dctx==NULL) return NULL; ZSTDv06_decompressBegin(dctx); return dctx; } size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx) { free(dctx); return 0; /* reserved as a potential error code in the future */ } void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dstDCtx, const ZSTDv06_DCtx* srcDCtx) { memcpy(dstDCtx, srcDCtx, sizeof(ZSTDv06_DCtx) - (ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH + ZSTDv06_frameHeaderSize_max)); /* no need to copy workspace */ } /*-************************************************************* * Decompression section ***************************************************************/ /* Frame format description Frame Header - [ Block Header - Block ] - Frame End 1) Frame Header - 4 bytes - Magic Number : ZSTDv06_MAGICNUMBER (defined within zstd_static.h) - 1 byte - Frame Descriptor 2) Block Header - 3 bytes, starting with a 2-bits descriptor Uncompressed, Compressed, Frame End, unused 3) Block See Block Format Description 4) Frame End - 3 bytes, compatible with Block Header */ /* Frame descriptor 1 byte, using : bit 0-3 : windowLog - ZSTDv06_WINDOWLOG_ABSOLUTEMIN (see zstd_internal.h) bit 4 : minmatch 4(0) or 3(1) bit 5 : reserved (must be zero) bit 6-7 : Frame content size : unknown, 1 byte, 2 bytes, 8 bytes Optional : content size (0, 1, 2 or 8 bytes) 0 : unknown 1 : 0-255 bytes 2 : 256 - 65535+256 8 : up to 16 exa */ /* Compressed Block, format description Block = Literal Section - Sequences Section Prerequisite : size of (compressed) block, maximum size of regenerated data 1) Literal Section 1.1) Header : 1-5 bytes flags: 2 bits 00 compressed by Huff0 01 unused 10 is Raw (uncompressed) 11 is Rle Note : using 01 => Huff0 with precomputed table ? Note : delta map ? => compressed ? 1.1.1) Huff0-compressed literal block : 3-5 bytes srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream srcSize < 1 KB => 3 bytes (2-2-10-10) srcSize < 16KB => 4 bytes (2-2-14-14) else => 5 bytes (2-2-18-18) big endian convention 1.1.2) Raw (uncompressed) literal block header : 1-3 bytes size : 5 bits: (IS_RAW<<6) + (0<<4) + size 12 bits: (IS_RAW<<6) + (2<<4) + (size>>8) size&255 20 bits: (IS_RAW<<6) + (3<<4) + (size>>16) size>>8&255 size&255 1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes size : 5 bits: (IS_RLE<<6) + (0<<4) + size 12 bits: (IS_RLE<<6) + (2<<4) + (size>>8) size&255 20 bits: (IS_RLE<<6) + (3<<4) + (size>>16) size>>8&255 size&255 1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream srcSize < 1 KB => 3 bytes (2-2-10-10) srcSize < 16KB => 4 bytes (2-2-14-14) else => 5 bytes (2-2-18-18) big endian convention 1- CTable available (stored into workspace ?) 2- Small input (fast heuristic ? Full comparison ? depend on clevel ?) 1.2) Literal block content 1.2.1) Huff0 block, using sizes from header See Huff0 format 1.2.2) Huff0 block, using prepared table 1.2.3) Raw content 1.2.4) single byte 2) Sequences section TO DO */ /** ZSTDv06_frameHeaderSize() : * srcSize must be >= ZSTDv06_frameHeaderSize_min. * @return : size of the Frame Header */ static size_t ZSTDv06_frameHeaderSize(const void* src, size_t srcSize) { if (srcSize < ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong); { U32 const fcsId = (((const BYTE*)src)[4]) >> 6; return ZSTDv06_frameHeaderSize_min + ZSTDv06_fcs_fieldSize[fcsId]; } } /** ZSTDv06_getFrameParams() : * decode Frame Header, or provide expected `srcSize`. * @return : 0, `fparamsPtr` is correctly filled, * >0, `srcSize` is too small, result is expected `srcSize`, * or an error code, which can be tested using ZSTDv06_isError() */ size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; if (srcSize < ZSTDv06_frameHeaderSize_min) return ZSTDv06_frameHeaderSize_min; if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown); /* ensure there is enough `srcSize` to fully read/decode frame header */ { size_t const fhsize = ZSTDv06_frameHeaderSize(src, srcSize); if (srcSize < fhsize) return fhsize; } memset(fparamsPtr, 0, sizeof(*fparamsPtr)); { BYTE const frameDesc = ip[4]; fparamsPtr->windowLog = (frameDesc & 0xF) + ZSTDv06_WINDOWLOG_ABSOLUTEMIN; if ((frameDesc & 0x20) != 0) return ERROR(frameParameter_unsupported); /* reserved 1 bit */ switch(frameDesc >> 6) /* fcsId */ { default: /* impossible */ case 0 : fparamsPtr->frameContentSize = 0; break; case 1 : fparamsPtr->frameContentSize = ip[5]; break; case 2 : fparamsPtr->frameContentSize = MEM_readLE16(ip+5)+256; break; case 3 : fparamsPtr->frameContentSize = MEM_readLE64(ip+5); break; } } return 0; } /** ZSTDv06_decodeFrameHeader() : * `srcSize` must be the size provided by ZSTDv06_frameHeaderSize(). * @return : 0 if success, or an error code, which can be tested using ZSTDv06_isError() */ static size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize) { size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize); if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported); return result; } typedef struct { blockType_t blockType; U32 origSize; } blockProperties_t; /*! ZSTDv06_getcBlockSize() : * Provides the size of compressed block from block header `src` */ static size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { const BYTE* const in = (const BYTE* const)src; U32 cSize; if (srcSize < ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong); bpPtr->blockType = (blockType_t)((*in) >> 6); cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; if (bpPtr->blockType == bt_end) return 0; if (bpPtr->blockType == bt_rle) return 1; return cSize; } static size_t ZSTDv06_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { if (dst==NULL) return ERROR(dstSize_tooSmall); if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); memcpy(dst, src, srcSize); return srcSize; } /*! ZSTDv06_decodeLiteralsBlock() : @return : nb of bytes read from src (< srcSize ) */ static size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx, const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ { const BYTE* const istart = (const BYTE*) src; /* any compressed block with literals segment must be at least this size */ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); switch(istart[0]>> 6) { case IS_HUF: { size_t litSize, litCSize, singleStream=0; U32 lhSize = ((istart[0]) >> 4) & 3; if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */ switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ /* 2 - 2 - 10 - 10 */ lhSize=3; singleStream = istart[0] & 16; litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2); litCSize = ((istart[1] & 3) << 8) + istart[2]; break; case 2: /* 2 - 2 - 14 - 14 */ lhSize=4; litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6); litCSize = ((istart[2] & 63) << 8) + istart[3]; break; case 3: /* 2 - 2 - 18 - 18 */ lhSize=5; litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2); litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4]; break; } if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected); if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); if (HUFv06_isError(singleStream ? HUFv06_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) : HUFv06_decompress (dctx->litBuffer, litSize, istart+lhSize, litCSize) )) return ERROR(corruption_detected); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case IS_PCH: { size_t litSize, litCSize; U32 lhSize = ((istart[0]) >> 4) & 3; if (lhSize != 1) /* only case supported for now : small litSize, single stream */ return ERROR(corruption_detected); if (!dctx->flagRepeatTable) return ERROR(dictionary_corrupted); /* 2 - 2 - 10 - 10 */ lhSize=3; litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2); litCSize = ((istart[1] & 3) << 8) + istart[2]; if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); { size_t const errorCode = HUFv06_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4); if (HUFv06_isError(errorCode)) return ERROR(corruption_detected); } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case IS_RAW: { size_t litSize; U32 lhSize = ((istart[0]) >> 4) & 3; switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ lhSize=1; litSize = istart[0] & 31; break; case 2: litSize = ((istart[0] & 15) << 8) + istart[1]; break; case 3: litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2]; break; } if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ if (litSize+lhSize > srcSize) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart+lhSize, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; return lhSize+litSize; } case IS_RLE: { size_t litSize; U32 lhSize = ((istart[0]) >> 4) & 3; switch(lhSize) { case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */ lhSize = 1; litSize = istart[0] & 31; break; case 2: litSize = ((istart[0] & 15) << 8) + istart[1]; break; case 3: litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2]; if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ break; } if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected); memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; } default: return ERROR(corruption_detected); /* impossible */ } } /*! ZSTDv06_buildSeqTable() : @return : nb bytes read from src, or an error code if it fails, testable with ZSTDv06_isError() */ static size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog, const void* src, size_t srcSize, const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable) { switch(type) { case FSEv06_ENCODING_RLE : if (!srcSize) return ERROR(srcSize_wrong); if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected); FSEv06_buildDTable_rle(DTable, *(const BYTE*)src); /* if *src > max, data is corrupted */ return 1; case FSEv06_ENCODING_RAW : FSEv06_buildDTable(DTable, defaultNorm, max, defaultLog); return 0; case FSEv06_ENCODING_STATIC: if (!flagRepeatTable) return ERROR(corruption_detected); return 0; default : /* impossible */ case FSEv06_ENCODING_DYNAMIC : { U32 tableLog; S16 norm[MaxSeq+1]; size_t const headerSize = FSEv06_readNCount(norm, &max, &tableLog, src, srcSize); if (FSEv06_isError(headerSize)) return ERROR(corruption_detected); if (tableLog > maxLog) return ERROR(corruption_detected); FSEv06_buildDTable(DTable, norm, max, tableLog); return headerSize; } } } static size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr, FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; /* check */ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); /* SeqHead */ { int nbSeq = *ip++; if (!nbSeq) { *nbSeqPtr=0; return 1; } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { if (ip+2 > iend) return ERROR(srcSize_wrong); nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; } else { if (ip >= iend) return ERROR(srcSize_wrong); nbSeq = ((nbSeq-0x80)<<8) + *ip++; } } *nbSeqPtr = nbSeq; } /* FSE table descriptors */ if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */ { U32 const LLtype = *ip >> 6; U32 const Offtype = (*ip >> 4) & 3; U32 const MLtype = (*ip >> 2) & 3; ip++; /* Build DTables */ { size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable); if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected); ip += bhSize; } { size_t const bhSize = ZSTDv06_buildSeqTable(DTableOffb, Offtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable); if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected); ip += bhSize; } { size_t const bhSize = ZSTDv06_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable); if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected); ip += bhSize; } } return ip-istart; } typedef struct { size_t litLength; size_t matchLength; size_t offset; } seq_t; typedef struct { BITv06_DStream_t DStream; FSEv06_DState_t stateLL; FSEv06_DState_t stateOffb; FSEv06_DState_t stateML; size_t prevOffset[ZSTDv06_REP_INIT]; } seqState_t; static void ZSTDv06_decodeSequence(seq_t* seq, seqState_t* seqState) { /* Literal length */ U32 const llCode = FSEv06_peekSymbol(&(seqState->stateLL)); U32 const mlCode = FSEv06_peekSymbol(&(seqState->stateML)); U32 const ofCode = FSEv06_peekSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */ U32 const llBits = LL_bits[llCode]; U32 const mlBits = ML_bits[mlCode]; U32 const ofBits = ofCode; U32 const totalBits = llBits+mlBits+ofBits; static const U32 LL_base[MaxLL+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; static const U32 ML_base[MaxML+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 36, 38, 40, 44, 48, 56, 64, 80, 96, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; static const U32 OF_base[MaxOff+1] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, /*fake*/ 1, 1 }; /* sequence */ { size_t offset; if (!ofCode) offset = 0; else { offset = OF_base[ofCode] + BITv06_readBits(&(seqState->DStream), ofBits); /* <= 26 bits */ if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream)); } if (offset < ZSTDv06_REP_NUM) { if (llCode == 0 && offset <= 1) offset = 1-offset; if (offset != 0) { size_t temp = seqState->prevOffset[offset]; if (offset != 1) { seqState->prevOffset[2] = seqState->prevOffset[1]; } seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset = temp; } else { offset = seqState->prevOffset[0]; } } else { offset -= ZSTDv06_REP_MOVE; seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } seq->offset = offset; } seq->matchLength = ML_base[mlCode] + MINMATCH + ((mlCode>31) ? BITv06_readBits(&(seqState->DStream), mlBits) : 0); /* <= 16 bits */ if (MEM_32bits() && (mlBits+llBits>24)) BITv06_reloadDStream(&(seqState->DStream)); seq->litLength = LL_base[llCode] + ((llCode>15) ? BITv06_readBits(&(seqState->DStream), llBits) : 0); /* <= 16 bits */ if (MEM_32bits() || (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv06_reloadDStream(&(seqState->DStream)); /* ANS state update */ FSEv06_updateState(&(seqState->stateLL), &(seqState->DStream)); /* <= 9 bits */ FSEv06_updateState(&(seqState->stateML), &(seqState->DStream)); /* <= 9 bits */ if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream)); /* <= 18 bits */ FSEv06_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <= 8 bits */ } static size_t ZSTDv06_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_8 = oend-8; const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; /* check */ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ /* copy Literals */ ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - base)) { /* offset beyond prefix */ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); match = dictEnd - (base-match); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = base; if (op > oend_8 || sequence.matchLength < MINMATCH) { while (op < oMatchEnd) *op++ = *match++; return sequenceLength; } } } /* Requirement: op <= oend_8 */ /* match within prefix */ if (sequence.offset < 8) { /* close range match, overlap */ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ int const sub2 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTDv06_copy4(op+4, match); match -= sub2; } else { ZSTDv06_copy8(op, match); } op += 8; match += 8; if (oMatchEnd > oend-(16-MINMATCH)) { if (op < oend_8) { ZSTDv06_wildcopy(op, match, oend_8 - op); match += oend_8 - op; op = oend_8; } while (op < oMatchEnd) *op++ = *match++; } else { ZSTDv06_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ } return sequenceLength; } static size_t ZSTDv06_decompressSequences( ZSTDv06_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; FSEv06_DTable* DTableLL = dctx->LLTable; FSEv06_DTable* DTableML = dctx->MLTable; FSEv06_DTable* DTableOffb = dctx->OffTable; const BYTE* const base = (const BYTE*) (dctx->base); const BYTE* const vBase = (const BYTE*) (dctx->vBase); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); int nbSeq; /* Build Decoding Tables */ { size_t const seqHSize = ZSTDv06_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->flagRepeatTable, ip, seqSize); if (ZSTDv06_isError(seqHSize)) return seqHSize; ip += seqHSize; dctx->flagRepeatTable = 0; } /* Regen sequences */ if (nbSeq) { seq_t sequence; seqState_t seqState; memset(&sequence, 0, sizeof(sequence)); sequence.offset = REPCODE_STARTVALUE; { U32 i; for (i=0; i= 5810037) && (pos < 5810400)) printf("Dpos %6u :%5u literals & match %3u bytes at distance %6u \n", pos, (U32)sequence.litLength, (U32)sequence.matchLength, (U32)sequence.offset); #endif { size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); if (ZSTDv06_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } } /* check if reached exact end */ if (nbSeq) return ERROR(corruption_detected); } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); if (lastLLSize > 0) { memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static void ZSTDv06_checkContinuity(ZSTDv06_DCtx* dctx, const void* dst) { if (dst != dctx->previousDstEnd) { /* not contiguous */ dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dst; dctx->previousDstEnd = dst; } } static size_t ZSTDv06_decompressBlock_internal(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; if (srcSize >= ZSTDv06_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* Decode literals sub-block */ { size_t const litCSize = ZSTDv06_decodeLiteralsBlock(dctx, src, srcSize); if (ZSTDv06_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; } return ZSTDv06_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); } size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { ZSTDv06_checkContinuity(dctx, dst); return ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); } /*! ZSTDv06_decompressFrame() : * `dctx` must be properly initialized */ static size_t ZSTDv06_decompressFrame(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* const iend = ip + srcSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + dstCapacity; size_t remainingSize = srcSize; blockProperties_t blockProperties = { bt_compressed, 0 }; /* check */ if (srcSize < ZSTDv06_frameHeaderSize_min+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong); /* Frame Header */ { size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min); if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize; if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong); if (ZSTDv06_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected); ip += frameHeaderSize; remainingSize -= frameHeaderSize; } /* Loop on each block */ while (1) { size_t decodedSize=0; size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, iend-ip, &blockProperties); if (ZSTDv06_isError(cBlockSize)) return cBlockSize; ip += ZSTDv06_blockHeaderSize; remainingSize -= ZSTDv06_blockHeaderSize; if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: decodedSize = ZSTDv06_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize); break; case bt_raw : decodedSize = ZSTDv06_copyRawBlock(op, oend-op, ip, cBlockSize); break; case bt_rle : return ERROR(GENERIC); /* not yet supported */ break; case bt_end : /* end of frame */ if (remainingSize) return ERROR(srcSize_wrong); break; default: return ERROR(GENERIC); /* impossible */ } if (cBlockSize == 0) break; /* bt_end */ if (ZSTDv06_isError(decodedSize)) return decodedSize; op += decodedSize; ip += cBlockSize; remainingSize -= cBlockSize; } return op-ostart; } size_t ZSTDv06_decompress_usingPreparedDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* refDCtx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { ZSTDv06_copyDCtx(dctx, refDCtx); ZSTDv06_checkContinuity(dctx, dst); return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize); } size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize) { ZSTDv06_decompressBegin_usingDict(dctx, dict, dictSize); ZSTDv06_checkContinuity(dctx, dst); return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize); } size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return ZSTDv06_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); } size_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { #if defined(ZSTDv06_HEAPMODE) && (ZSTDv06_HEAPMODE==1) size_t regenSize; ZSTDv06_DCtx* dctx = ZSTDv06_createDCtx(); if (dctx==NULL) return ERROR(memory_allocation); regenSize = ZSTDv06_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTDv06_freeDCtx(dctx); return regenSize; #else /* stack mode */ ZSTDv06_DCtx dctx; return ZSTDv06_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); #endif } /* ZSTD_errorFrameSizeInfoLegacy() : assumes `cSize` and `dBound` are _not_ NULL */ static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) { *cSize = ret; *dBound = ZSTD_CONTENTSIZE_ERROR; } void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) { const BYTE* ip = (const BYTE*)src; size_t remainingSize = srcSize; size_t nbBlocks = 0; blockProperties_t blockProperties = { bt_compressed, 0 }; /* Frame Header */ { size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, srcSize); if (ZSTDv06_isError(frameHeaderSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize); return; } if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); return; } if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } ip += frameHeaderSize; remainingSize -= frameHeaderSize; } /* Loop on each block */ while (1) { size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTDv06_isError(cBlockSize)) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize); return; } ip += ZSTDv06_blockHeaderSize; remainingSize -= ZSTDv06_blockHeaderSize; if (cBlockSize > remainingSize) { ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); return; } if (cBlockSize == 0) break; /* bt_end */ ip += cBlockSize; remainingSize -= cBlockSize; nbBlocks++; } *cSize = ip - (const BYTE*)src; *dBound = nbBlocks * ZSTDv06_BLOCKSIZE_MAX; } /*_****************************** * Streaming Decompression API ********************************/ size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx) { return dctx->expected; } size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { /* Sanity check */ if (srcSize != dctx->expected) return ERROR(srcSize_wrong); if (dstCapacity) ZSTDv06_checkContinuity(dctx, dst); /* Decompress : frame header; part 1 */ switch (dctx->stage) { case ZSTDds_getFrameHeaderSize : if (srcSize != ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */ dctx->headerSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min); if (ZSTDv06_isError(dctx->headerSize)) return dctx->headerSize; memcpy(dctx->headerBuffer, src, ZSTDv06_frameHeaderSize_min); if (dctx->headerSize > ZSTDv06_frameHeaderSize_min) { dctx->expected = dctx->headerSize - ZSTDv06_frameHeaderSize_min; dctx->stage = ZSTDds_decodeFrameHeader; return 0; } dctx->expected = 0; /* not necessary to copy more */ /* fall-through */ case ZSTDds_decodeFrameHeader: { size_t result; memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected); result = ZSTDv06_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize); if (ZSTDv06_isError(result)) return result; dctx->expected = ZSTDv06_blockHeaderSize; dctx->stage = ZSTDds_decodeBlockHeader; return 0; } case ZSTDds_decodeBlockHeader: { blockProperties_t bp; size_t const cBlockSize = ZSTDv06_getcBlockSize(src, ZSTDv06_blockHeaderSize, &bp); if (ZSTDv06_isError(cBlockSize)) return cBlockSize; if (bp.blockType == bt_end) { dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; } else { dctx->expected = cBlockSize; dctx->bType = bp.blockType; dctx->stage = ZSTDds_decompressBlock; } return 0; } case ZSTDds_decompressBlock: { size_t rSize; switch(dctx->bType) { case bt_compressed: rSize = ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break; case bt_raw : rSize = ZSTDv06_copyRawBlock(dst, dstCapacity, src, srcSize); break; case bt_rle : return ERROR(GENERIC); /* not yet handled */ break; case bt_end : /* should never happen (filtered at phase 1) */ rSize = 0; break; default: return ERROR(GENERIC); /* impossible */ } dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTDv06_blockHeaderSize; dctx->previousDstEnd = (char*)dst + rSize; return rSize; } default: return ERROR(GENERIC); /* impossible */ } } static void ZSTDv06_refDictContent(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize) { dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dict; dctx->previousDstEnd = (const char*)dict + dictSize; } static size_t ZSTDv06_loadEntropy(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize) { size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, litlengthHeaderSize; hSize = HUFv06_readDTableX4(dctx->hufTableX4, dict, dictSize); if (HUFv06_isError(hSize)) return ERROR(dictionary_corrupted); dict = (const char*)dict + hSize; dictSize -= hSize; { short offcodeNCount[MaxOff+1]; U32 offcodeMaxValue=MaxOff, offcodeLog; offcodeHeaderSize = FSEv06_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize); if (FSEv06_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); { size_t const errorCode = FSEv06_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog); if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); } dict = (const char*)dict + offcodeHeaderSize; dictSize -= offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; matchlengthHeaderSize = FSEv06_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize); if (FSEv06_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); { size_t const errorCode = FSEv06_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog); if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); } dict = (const char*)dict + matchlengthHeaderSize; dictSize -= matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; litlengthHeaderSize = FSEv06_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize); if (FSEv06_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); { size_t const errorCode = FSEv06_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog); if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); } } dctx->flagRepeatTable = 1; return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize; } static size_t ZSTDv06_decompress_insertDictionary(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize) { size_t eSize; U32 const magic = MEM_readLE32(dict); if (magic != ZSTDv06_DICT_MAGIC) { /* pure content mode */ ZSTDv06_refDictContent(dctx, dict, dictSize); return 0; } /* load entropy tables */ dict = (const char*)dict + 4; dictSize -= 4; eSize = ZSTDv06_loadEntropy(dctx, dict, dictSize); if (ZSTDv06_isError(eSize)) return ERROR(dictionary_corrupted); /* reference dictionary content */ dict = (const char*)dict + eSize; dictSize -= eSize; ZSTDv06_refDictContent(dctx, dict, dictSize); return 0; } size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize) { { size_t const errorCode = ZSTDv06_decompressBegin(dctx); if (ZSTDv06_isError(errorCode)) return errorCode; } if (dict && dictSize) { size_t const errorCode = ZSTDv06_decompress_insertDictionary(dctx, dict, dictSize); if (ZSTDv06_isError(errorCode)) return ERROR(dictionary_corrupted); } return 0; } /* Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd homepage : http://www.zstd.net/ */ /*-*************************************************************************** * Streaming decompression howto * * A ZBUFFv06_DCtx object is required to track streaming operations. * Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources. * Use ZBUFFv06_decompressInit() to start a new decompression operation, * or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv06_DCtx objects can be re-init multiple times. * * Use ZBUFFv06_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency), * or 0 when a frame is completely decoded, * or an error code, which can be tested using ZBUFFv06_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize() * output : ZBUFFv06_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv06_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ typedef enum { ZBUFFds_init, ZBUFFds_loadHeader, ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv06_dStage; /* *** Resource management *** */ struct ZBUFFv06_DCtx_s { ZSTDv06_DCtx* zd; ZSTDv06_frameParams fParams; ZBUFFv06_dStage stage; char* inBuff; size_t inBuffSize; size_t inPos; char* outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t blockSize; BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX]; size_t lhSize; }; /* typedef'd to ZBUFFv06_DCtx within "zstd_buffered.h" */ ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void) { ZBUFFv06_DCtx* zbd = (ZBUFFv06_DCtx*)malloc(sizeof(ZBUFFv06_DCtx)); if (zbd==NULL) return NULL; memset(zbd, 0, sizeof(*zbd)); zbd->zd = ZSTDv06_createDCtx(); zbd->stage = ZBUFFds_init; return zbd; } size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* zbd) { if (zbd==NULL) return 0; /* support free on null */ ZSTDv06_freeDCtx(zbd->zd); free(zbd->inBuff); free(zbd->outBuff); free(zbd); return 0; } /* *** Initialization *** */ size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* zbd, const void* dict, size_t dictSize) { zbd->stage = ZBUFFds_loadHeader; zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0; return ZSTDv06_decompressBegin_usingDict(zbd->zd, dict, dictSize); } size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* zbd) { return ZBUFFv06_decompressInitDictionary(zbd, NULL, 0); } MEM_STATIC size_t ZBUFFv06_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t length = MIN(dstCapacity, srcSize); if (length > 0) { memcpy(dst, src, length); } return length; } /* *** Decompression *** */ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr) { const char* const istart = (const char*)src; const char* const iend = istart + *srcSizePtr; const char* ip = istart; char* const ostart = (char*)dst; char* const oend = ostart + *dstCapacityPtr; char* op = ostart; U32 notDone = 1; while (notDone) { switch(zbd->stage) { case ZBUFFds_init : return ERROR(init_missing); case ZBUFFds_loadHeader : { size_t const hSize = ZSTDv06_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize); if (hSize != 0) { size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (ZSTDv06_isError(hSize)) return hSize; if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize; /* remaining header bytes + next block header */ } memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad; break; } } /* Consume header */ { size_t const h1Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd); /* == ZSTDv06_frameHeaderSize_min */ size_t const h1Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size); if (ZSTDv06_isError(h1Result)) return h1Result; if (h1Size < zbd->lhSize) { /* long header */ size_t const h2Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd); size_t const h2Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size); if (ZSTDv06_isError(h2Result)) return h2Result; } } /* Frame header instruct buffer sizes */ { size_t const blockSize = MIN(1 << zbd->fParams.windowLog, ZSTDv06_BLOCKSIZE_MAX); zbd->blockSize = blockSize; if (zbd->inBuffSize < blockSize) { free(zbd->inBuff); zbd->inBuffSize = blockSize; zbd->inBuff = (char*)malloc(blockSize); if (zbd->inBuff == NULL) return ERROR(memory_allocation); } { size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2; if (zbd->outBuffSize < neededOutSize) { free(zbd->outBuff); zbd->outBuffSize = neededOutSize; zbd->outBuff = (char*)malloc(neededOutSize); if (zbd->outBuff == NULL) return ERROR(memory_allocation); } } } zbd->stage = ZBUFFds_read; /* fall-through */ case ZBUFFds_read: { size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd); if (neededInSize==0) { /* end of frame */ zbd->stage = ZBUFFds_init; notDone = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd, zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart, ip, neededInSize); if (ZSTDv06_isError(decodedSize)) return decodedSize; ip += neededInSize; if (!decodedSize) break; /* this was just a header */ zbd->outEnd = zbd->outStart + decodedSize; zbd->stage = ZBUFFds_flush; break; } if (ip==iend) { notDone = 0; break; } /* no more input */ zbd->stage = ZBUFFds_load; } /* fall-through */ case ZBUFFds_load: { size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd); size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */ size_t loadedSize; if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected); /* should never happen */ loadedSize = ZBUFFv06_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip); ip += loadedSize; zbd->inPos += loadedSize; if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ { size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd, zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart, zbd->inBuff, neededInSize); if (ZSTDv06_isError(decodedSize)) return decodedSize; zbd->inPos = 0; /* input is consumed */ if (!decodedSize) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */ zbd->outEnd = zbd->outStart + decodedSize; zbd->stage = ZBUFFds_flush; /* break; */ /* ZBUFFds_flush follows */ } } /* fall-through */ case ZBUFFds_flush: { size_t const toFlushSize = zbd->outEnd - zbd->outStart; size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize); op += flushedSize; zbd->outStart += flushedSize; if (flushedSize == toFlushSize) { zbd->stage = ZBUFFds_read; if (zbd->outStart + zbd->blockSize > zbd->outBuffSize) zbd->outStart = zbd->outEnd = 0; break; } /* cannot flush everything */ notDone = 0; break; } default: return ERROR(GENERIC); /* impossible */ } } /* result */ *srcSizePtr = ip-istart; *dstCapacityPtr = op-ostart; { size_t nextSrcSizeHint = ZSTDv06_nextSrcSizeToDecompress(zbd->zd); if (nextSrcSizeHint > ZSTDv06_blockHeaderSize) nextSrcSizeHint+= ZSTDv06_blockHeaderSize; /* get following block header too */ nextSrcSizeHint -= zbd->inPos; /* already loaded*/ return nextSrcSizeHint; } } /* ************************************* * Tool functions ***************************************/ size_t ZBUFFv06_recommendedDInSize(void) { return ZSTDv06_BLOCKSIZE_MAX + ZSTDv06_blockHeaderSize /* block header size*/ ; } size_t ZBUFFv06_recommendedDOutSize(void) { return ZSTDv06_BLOCKSIZE_MAX; } borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v04.h0000644000175000017500000001445313771325506024431 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V04_H_91868324769238 #define ZSTD_V04_H_91868324769238 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv04_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.4.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error */ unsigned ZSTDv04_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx; ZSTDv04_Dctx* ZSTDv04_createDCtx(void); size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx); size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Direct Streaming ***************************************/ size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx); size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx); size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Buffered Streaming ***************************************/ typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx; ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void); size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx); size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx); size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize); size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr); /** ************************************************ * Streaming decompression * * A ZBUFF_DCtx object is required to track streaming operation. * Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. * Use ZBUFF_decompressInit() to start a new decompression operation. * ZBUFF_DCtx objects can be reused multiple times. * * Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary() * It must be the same content as the one set during compression phase. * Dictionary content must remain accessible during the decompression process. * * Use ZBUFF_decompressContinue() repetitively to consume your input. * *srcSizePtr and *maxDstSizePtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) * or 0 when a frame is completely decoded * or an error code, which can be tested using ZBUFF_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize * output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded. * input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * **************************************************/ unsigned ZBUFFv04_isError(size_t errorCode); const char* ZBUFFv04_getErrorName(size_t errorCode); /** The below functions provide recommended buffer sizes for Compression or Decompression operations. * These sizes are not compulsory, they just tend to offer better latency */ size_t ZBUFFv04_recommendedDInSize(void); size_t ZBUFFv04_recommendedDOutSize(void); /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv04_magicNumber 0xFD2FB524 /* v0.4 */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V04_H_91868324769238 */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v06.h0000644000175000017500000001761713771325506024440 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDv06_H #define ZSTDv06_H #if defined (__cplusplus) extern "C" { #endif /*====== Dependency ======*/ #include /* size_t */ /*====== Export for Windows ======*/ /*! * ZSTDv06_DLL_EXPORT : * Enable exporting of functions when building a Windows DLL */ #if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1) # define ZSTDLIBv06_API __declspec(dllexport) #else # define ZSTDLIBv06_API #endif /* ************************************* * Simple functions ***************************************/ /*! ZSTDv06_decompress() : `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail. `dstCapacity` must be large enough, equal or larger than originalSize. @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */ ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /** ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /* ************************************* * Helper functions ***************************************/ ZSTDLIBv06_API size_t ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */ /* Error Management */ ZSTDLIBv06_API unsigned ZSTDv06_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code); /*!< provides readable string for an error code */ /* ************************************* * Explicit memory management ***************************************/ /** Decompression context */ typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx; ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void); ZSTDLIBv06_API size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx); /*!< @return : errorCode */ /** ZSTDv06_decompressDCtx() : * Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */ ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-*********************** * Dictionary API *************************/ /*! ZSTDv06_decompress_usingDict() : * Decompression using a pre-defined Dictionary content (see dictBuilder). * Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted. * Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */ ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*-************************ * Advanced Streaming API ***************************/ struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; }; typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams; ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */ ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIBv06_API void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx); ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx); ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* ************************************* * ZBUFF API ***************************************/ typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx; ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void); ZSTDLIBv06_API size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx); ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx); ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression howto * * A ZBUFFv06_DCtx object is required to track streaming operations. * Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources. * Use ZBUFFv06_decompressInit() to start a new decompression operation, * or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv06_DCtx objects can be re-init multiple times. * * Use ZBUFFv06_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency), * or 0 when a frame is completely decoded, * or an error code, which can be tested using ZBUFFv06_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize() * output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv06_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode); ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, they tend to offer better latency */ ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void); ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void); /*-************************************* * Constants ***************************************/ #define ZSTDv06_MAGICNUMBER 0xFD2FB526 /* v0.6 */ #if defined (__cplusplus) } #endif #endif /* ZSTDv06_BUFFERED_H */ borgbackup-1.1.15/src/borg/algorithms/zstd/lib/legacy/zstd_v02.c0000644000175000017500000037216613771325506024432 0ustar useruser00000000000000/* * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include /* size_t, ptrdiff_t */ #include "zstd_v02.h" #include "../common/error_private.h" /****************************************** * Compiler-specific ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif /* ****************************************************************** mem.h low-level memory access routines Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /****************************************** * Includes ******************************************/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ /****************************************** * Compiler-specific ******************************************/ #if defined(__GNUC__) # define MEM_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /**************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /**************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets generating assembly depending on alignment. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE24(const void* memPtr) { return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); } } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); } } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /* ****************************************************************** bitstream Part of NewGen Entropy library header file (to include) Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which highly benefit from being inlined. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /********************************************** * bitStream decompression API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /****************************************** * unsafe API ******************************************/ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /**************************************************************** * Helper functions ****************************************************************/ MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /********************************************************** * bitStream decoding **********************************************************/ /*!BIT_initDStream * Initialize a BIT_DStream_t. * @bitD : a pointer to an already allocated BIT_DStream_t structure * @srcBuffer must point at the beginning of a bitStream * @srcSize must be the exact size of the bitStream * @result : size of stream (== srcSize) or an errorCode if a problem is detected */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } if (srcSize >= sizeof(size_t)) /* normal case */ { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); bitD->bitContainer = MEM_readLEST(bitD->ptr); contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); } else { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); /* fallthrough */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); /* fallthrough */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); /* fallthrough */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fallthrough */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fallthrough */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fallthrough */ default:; } contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } /*! BIT_lookBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*!BIT_readBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return BIT_DStream_overflow; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BIT_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BIT_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } /*! BIT_endOfDStream * @return Tells if DStream has reached its exact end */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ /* ****************************************************************** Error codes and messages Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE #if defined (__cplusplus) extern "C" { #endif /****************************************** * Compiler-specific ******************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define ERR_STATIC static inline #elif defined(_MSC_VER) # define ERR_STATIC static __inline #elif defined(__GNUC__) # define ERR_STATIC static __attribute__((unused)) #else # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /****************************************** * Error Management ******************************************/ #define PREFIX(name) ZSTD_error_##name #define ERROR(name) (size_t)-PREFIX(name) #define ERROR_LIST(ITEM) \ ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \ ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \ ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \ ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \ ITEM(PREFIX(maxCode)) #define ERROR_GENERATE_ENUM(ENUM) ENUM, typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */ #define ERROR_CONVERTTOSTRING(STRING) #STRING, #define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR) static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) }; ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC const char* ERR_getErrorName(size_t code) { static const char* codeError = "Unspecified error code"; if (ERR_isError(code)) return ERR_strings[-(int)(code)]; return codeError; } #if defined (__cplusplus) } #endif #endif /* ERROR_H_MODULE */ /* Constructor and Destructor of type FSE_CTable Note that its size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ /* ****************************************************************** FSE : Finite State Entropy coder header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif /****************************************** * Static allocation ******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 #define FSE_BLOCKBOUND(size) (size + (size>>7)) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ /****************************************** * Implementation of inline functions ******************************************/ /* decompression */ typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } #if defined (__cplusplus) } #endif /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif /****************************************** * Static allocation macros ******************************************/ /* Huff0 buffer bounds */ #define HUF_CTABLEBOUND 129 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of Huff0's DTable */ #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1< /* size_t */ /* ************************************* * Version ***************************************/ #define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ #define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */ #define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ #if defined (__cplusplus) } #endif /* zstd - standard compression library Header File for static linking only Copyright (C) 2014-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /* The objects defined into this file should be considered experimental. * They are not labelled stable, as their prototype may change in the future. * You can use them for tests, provide feedback, or if you can endure risk of future changes. */ #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Streaming functions ***************************************/ typedef struct ZSTD_DCtx_s ZSTD_DCtx; /* Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/ #if defined (__cplusplus) } #endif /* ****************************************************************** FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSE_COMMONDEFS_ONLY /**************************************************************** * Tuning parameters ****************************************************************/ /* MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSE_MAX_MEMORY_USAGE 14 #define FSE_DEFAULT_MEMORY_USAGE 13 /* FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSE_MAX_SYMBOL_VALUE 255 /**************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION /**************************************************************** * Byte symbol type ****************************************************************/ #endif /* !FSE_COMMONDEFS_ONLY */ /**************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /**************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif /**************************************************************** * Error Management ****************************************************************/ #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /**************************************************************** * Complex types ****************************************************************/ typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; /**************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ #define FSE_DECODE_TYPE FSE_decode_t static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } static size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* ptr = dt+1; FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr; FSE_DTableHeader DTableH; const U32 tableSize = 1 << tableLog; const U32 tableMask = tableSize-1; const U32 step = FSE_tableStep(tableSize); U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; U32 position = 0; U32 highThreshold = tableSize-1; const S16 largeLimit= (S16)(1 << (tableLog-1)); U32 noLarge = 1; U32 s; /* Sanity Checks */ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ DTableH.tableLog = (U16)tableLog; for (s=0; s<=maxSymbolValue; s++) { if (normalizedCounter[s]==-1) { tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) noLarge=0; symbolNext[s] = normalizedCounter[s]; } } /* Spread symbols */ for (s=0; s<=maxSymbolValue; s++) { int i; for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ /* Build Decoding table */ { U32 i; for (i=0; i FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { const short max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSE_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } { if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } } if (remaining != 1) return ERROR(GENERIC); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); return ip-istart; } /********************************************************* * Decompression (Byte symbols) *********************************************************/ static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ DTableH->tableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSymbolValue = tableMask; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s<=maxSymbolValue; s++) { dinfo[s].newState = 0; dinfo[s].symbol = (BYTE)s; dinfo[s].nbBits = (BYTE)nbBits; } return 0; } FORCE_INLINE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt, const unsigned fast) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-3; BIT_DStream_t bitD; FSE_DState_t state1; FSE_DState_t state2; size_t errorCode; /* Init */ errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ if (FSE_isError(errorCode)) return errorCode; FSE_initDState(&state1, &bitD, dt); FSE_initDState(&state2, &bitD, dt); #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) /* 4 symbols per loop */ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ while (1) { if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) break; *op++ = FSE_GETSYMBOL(&state1); if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) break; *op++ = FSE_GETSYMBOL(&state2); } /* end ? */ if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) return op-ostart; if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ return ERROR(corruption_detected); } static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); /* select fast mode (static) */ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; size_t errorCode; if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ /* normal FSE decoding mode */ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSE_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ ip += errorCode; cSrcSize -= errorCode; errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSE_isError(errorCode)) return errorCode; /* always return, even if it is an error code */ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); } #endif /* FSE_COMMONDEFS_ONLY */ /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /**************************************************************** * Compiler specifics ****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) /* inline is defined */ #elif defined(_MSC_VER) # define inline __inline #else # define inline /* disable inline */ #endif #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /**************************************************************** * Error Management ****************************************************************/ #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /****************************************** * Helper functions ******************************************/ static unsigned HUF_isError(size_t code) { return ERR_isError(code); } #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ #define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ #define HUF_MAX_SYMBOL_VALUE 255 #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) # error "HUF_MAX_TABLELOG is too large !" #endif /********************************************************* * Huff0 : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; /*! HUF_readStats Read compact Huffman tree, saved by HUF_writeCTable @huffWeight : destination buffer @return : size read from `src` */ static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; U32 tableLog; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; U32 n; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) /* special header */ { if (iSize >= (242)) /* RLE */ { static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, hwSize); iSize = 0; } else /* Incompressible */ { oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else /* header compressed with FSE (normal case) */ { if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); weightTotal = 0; for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ tableLog = BIT_highbit32(weightTotal) + 1; if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); { U32 total = 1 << tableLog; U32 rest = total - weightTotal; U32 verif = 1 << BIT_highbit32(rest); U32 lastWeight = BIT_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); *tableLogPtr = tableLog; return iSize+1; } /**************************/ /* single-symbol decoding */ /**************************/ static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; const BYTE* ip = (const BYTE*) src; size_t iSize = ip[0]; U32 nbSymbols = 0; U32 n; U32 nextRankStart; void* ptr = DTable+1; HUF_DEltX2* const dt = (HUF_DEltX2*)ptr; HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } /* fill DTable */ for (n=0; n> 1; U32 i; HUF_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize; } static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; BIT_skipBits(Dstream, dt[val].nbBits); return c; } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_1(p, bitDPtr); HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, hence no need to reload */ while (p < pEnd) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } static size_t HUF_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* ptr = DTable; const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t errorCode; errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); if (HUF_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /***************************/ /* double-symbols decoding */ /***************************/ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUF_DEltX4 DElt; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; U32 s; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ for (s=0; s= 1 */ rankVal[weight] += length; } } typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) /* enough room for a second symbol */ { U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { U32 i; const U32 end = start + length; HUF_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; for (i = start; i < end; i++) DTable[i] = DElt; } rankVal[weight] += length; } } static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) { BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; U32* const rankStart = rankStart0+1; rankVal_t rankVal; U32 tableLog, maxW, sizeOfSort, nbSymbols; const U32 memLog = DTable[0]; const BYTE* ip = (const BYTE*) src; size_t iSize = ip[0]; void* ptr = DTable; HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1; HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w<=maxW; w++) { U32 current = nextRankStart; nextRankStart += rankStats[w]; rankStart[w] = current; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ sizeOfSort = nextRankStart; } /* sort symbols by weight */ { U32 s; for (s=0; s> consumed; } } } HUF_fillDTableX4(dt, memLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); return iSize; } static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ } } return 1; } #define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) { HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_1(p, bitDPtr); HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); while (p <= pEnd-2) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); return p-pStart; } static size_t HUF_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U32* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* ptr = DTable; const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_1(op1, &bitD1); HUF_DECODE_SYMBOLX4_1(op2, &bitD2); HUF_DECODE_SYMBOLX4_1(op3, &bitD3); HUF_DECODE_SYMBOLX4_1(op4, &bitD4); HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_0(op1, &bitD1); HUF_DECODE_SYMBOLX4_0(op2, &bitD2); HUF_DECODE_SYMBOLX4_0(op3, &bitD3); HUF_DECODE_SYMBOLX4_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /**********************************/ /* quad-symbol decoding */ /**********************************/ typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6; typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6; /* recursive, up to level 3; may benefit from